Ansible Script 추가

This commit is contained in:
ByeonJungHun
2023-12-19 13:36:16 +09:00
parent 0273450ff6
commit 05cb8d9269
2610 changed files with 281893 additions and 0 deletions

1
ansible/01_old/README.md Normal file
View File

@@ -0,0 +1 @@
# ansible_script

20
ansible/01_old/all_host Executable file
View File

@@ -0,0 +1,20 @@
[release]
10.10.43.100
10.10.43.101
[dsk_dev]
10.10.43.[110:200]
[cmoa]
10.10.43.[201:253]
[ubuntu]
13.125.123.49 ansible_user=ubuntu
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[dev2:children]
release
dsk_dev
cmoa

10
ansible/01_old/ansible.cfg Executable file
View File

@@ -0,0 +1,10 @@
[defaults]
inventory = inventory
roles_path = roles
deprecation_warnings = False
display_skipped_hosts = no
ansible_home = .
stdout_callback = debug
host_key_checking=False
#private_key_file=/root/.ssh/dev2-iac
#remote_tmp = /tmp/.ansible/tmp

View File

@@ -0,0 +1,26 @@
# Start from Python 3.9 base image
FROM nexus2.exem-oss.org/awx-ee:latest
USER root
# Update and install dependencies
RUN yum clean all && \
yum makecache && \
yum update -y && \
yum install -y sudo nfs-utils
# Python package management
RUN pip3 uninstall -y crypto pycrypto && \
pip3 install pycryptodome hvac xlwt
# Install kubectl
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin/
# Copy kubeconfig (Not recommended for production)
COPY kubeconfig /root/.kube/config
# Keep the container running
#CMD ["bash"]
CMD ["tail", "-f", "/dev/null"]

View File

@@ -0,0 +1,23 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXlPREF4TURJeU1Wb1hEVE15TVRJeU5UQXhNREl5TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT3drCldCR0hqaHRQQUZCVHNBNWJUMzIrNDl2WW4vbkZkd1h5MW5GNlBEYmUzL0ZMYzJDZGE2OENCSk95eTh2L2VvUW4KWGVleXRua05MTTdxaGFrbCtNUnBucFU2Wnp1NGdkdlJML1VHWDd3dTYwNFMvdkpQMXVNcFhMWEZEYUQzbFI2aQpEVm9jcUFmMGZsNFZDU21ldkJnTHpQOFl1cElrbllhc0FaRzJZbGQ2K1Y2RFNwWTA3aFBjQWNYdXo3c24vS202CjNqb2M0TnA1ZlFBSXVoZjVGdXA0UkE2YXAzWHZ3NllMNWExWFVUNzdjY1I3S0JHcDdtRFl3OVN1RloyVHFuOXIKSDUrQll0YytXdjlHNUZLOHcxVFAzYUFGYTJDMnhQYy9FWGxVa1UxOUsveE1ocDdiTm12eWlRd3VBbVI0NjJSKwo4aXBYNXFlam5hTnhITVVoeU5jQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNajA1VFJsa0w5SlZlNUdTS05UOThlbzVqOE1NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTlA4aWFIZWh4a3RkTStSc3FJMwplU1VoUEFkdW5TSm5MdTJLRFZCYUgxeHVLQkNBbUUzNjBFbk1FUmFLQjNKWUVuVE9aeWR1Y3Z1amg0bVdVaGQrCjA2RVl0aDRYVW1sSytsVTJkWHdia2s4UGRrZnVrRE5ZOG8vV20vc05oSlF4VTlFMHhZSGwwSDgwVlQ1Mk5CR1oKSkRnUDREaUVibzluajBhaVJkaDFYMmROZkh5Vzl0VGZPM210OGVtUldSVVV1Ly91anNsMTJ1VjZNRjFTVmlRLwpCaU1iODMvVit5aHVWa09HVDk5c25BMTNCTkF1WVFDeUpaK3ZRdm5jdTdCbW5sMXdtRjhVTVNjYTFUMEZiOVR1CjFkNnVqWTM2U1pjbXlHSHU5SW1lb2FoYkdQMmNFNkI0YnlrNDA5UGFwbnl3dGRUNC9hREU5aGlQVGZMNkc0ZXEKMmRJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://10.10.43.240:6443
name: saas-mgmt
contexts:
- context:
cluster: saas-mgmt
user: saas-mgmt
name: saas-mgmt
current-context: saas-mgmt
kind: Config
preferences: {}
users:
- name: k8s-prod.datasaker.io
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lSQU1HZExUZ0psRzFLVXpoR1kzT2RIMVV3RFFZSktvWklodmNOQVFFTEJRQXcKR0RFV01CUUdBMVVFQXhNTmEzVmlaWEp1WlhSbGN5MWpZVEFlRncweU16QTRNak13TmpJME16ZGFGdzB5TXpBNApNall3TURJME16ZGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3hyCmRXSmxZMlpuTFhKdmIzUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEVEFrVXEKeUp6N1ZYak5uY0NRUy9qK29YdTZNWUtGSUxDYjNvUFVDelhSbzZsL3VXQy9hL1R2SmxHWmNJNFRzQURKdGdOYwovZEM4YlJGdUNaVmEwNGljMWY2U2VoSVZhVGYwbHh6SHZOQjcwSUQ0ajB1Z2JVeCtDS0pXNkF3Wk9jU0t4Yk5yCi9VNThZeFVjVVIxa2NhYTNmeU8zTmJNNGpENkE2TVQySFZCS3p4Wk4vek5HdjZCUy9RYjROVVFjMFprUDUvb0cKc1ptL3dtWVJZb1dyTld2NXVzWVFnS25HMUNyTVdNMFgySGtDWkF1cXpHUU9CMWpvK2RZWVQvWnhab2lIZEx3ZgpaWjIwb2paOHQ1MnBMZ1VlbTJsQ2xraXJHdEFKRzI2MGluVDNLMTBBakdSMEE5ZVlJNzd5VEQrUHZ2c2tTUHVkCnIxb1FqRDRWYUZIMG1IWTFBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJSGdEQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkVyeVN4QzRVNlNWTUtmVQpaWFBqNnlhSE9iSDlNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUURES3BreklUeFlEUFBjK1pabkJaZjdOV2wvCjVTRmRRVU9IRmZ6K1ZSMjNOdnBra0VQaVcrWXNiU2dFQUcvbldVK3gvVFJqN3JORDg2cGlmODNJYXN6WjlpV3gKK1ZYRE9rekQrcG5qWXlVa2t6WW8vM1dZQklRaUdlNTRBNjlUY2VEYjV0a3J6RkFxd3JhUXI4VFJ6VVVaNzVDVQp6dmFqMkRZcUhZN1dkRlJTZUhqcm9EVHB2d1BXSjU2YjI1d0NndGdHV29uM2ZBWERjV1Z2ZzJsUnZMOHI0ZmpCCkNSRSswNjdwSGkvc2VpNU1QMFI5WkZGbjRrbm1peHE0TW1tcjd6UGZ6ZFhwS1dDOHRZKzZOemszMzdnZFhZNnYKbjVNOW1VYjk5ZngwL0J5ZGNQeGVqeXVZV3I2Y01DKzBQOE9uU1BSUjM1MS8xYVFaQXJLWnU3TmYySFVUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMHdKRktzaWMrMVY0elozQWtFdjQvcUY3dWpHQ2hTQ3dtOTZEMUFzMTBhT3BmN2xnCnYydjA3eVpSbVhDT0U3QUF5YllEWFAzUXZHMFJiZ21WV3RPSW5OWCtrbm9TRldrMzlKY2N4N3pRZTlDQStJOUwKb0cxTWZnaWlWdWdNR1RuRWlzV3phLzFPZkdNVkhGRWRaSEdtdDM4anR6V3pPSXcrZ09qRTloMVFTczhXVGY4egpScitnVXYwRytEVkVITkdaRCtmNkJyR1p2OEptRVdLRnF6VnIrYnJHRUlDcHh0UXF6RmpORjloNUFtUUxxc3hrCkRnZFk2UG5XR0UvMmNXYUloM1M4SDJXZHRLSTJmTGVkcVM0RkhwdHBRcFpJcXhyUUNSdHV0SXAwOXl0ZEFJeGsKZEFQWG1DTys4a3cvajc3N0pFajduYTlhRUl3K0ZXaFI5SmgyTlFJREFRQUJBb0lCQVFERnp1SUNhcEJuT01nSAprWFFja1d2NVlHNjVySUlieFBwckZGem00aDl3eUlrME9CZFBPNmdnclA1ZjVsajZjY3M3VFFxNEdTU2VEMjBBCmg3Rmd0TjdqaitTWGNpSVR1bEIvVlUzZ25NdWcxbVNoSHN3WnQzeTJ4ZWRScXpUMFRPaEg0M0FBc3pUcGZJVWsKeDVIVFFJdTJoMVIzQXJ0aExtL0ZydkE5ZkZ0eDFCM3d3TUtEdmtObDN2bU82TnMxY3J3cjJmOUw1TTNJUVJXQwpRbVNFOGFSUkk4Rnhob2FKb3JRY3U0VFpocDBGUzYrVkR3bG9OWkRGZ0M4Uk1YSWd0ZXZkVnpMdGxQUUVSUTA3CmhkdFZMcklGYktQNGtRaVk1emlUYlhKOXdFSFVLNWF6ekRVNnVNU3RZc3ZveVVpR3A4REozeWFPM0RwNS93MkYKaTFRcE1oRWRBb0dCQU5tT2NWL0tVODdiQ1IydnN2eU1DOVVZNS8wdWpZZU1ndk00dC9OWXlYUmRwVFYrSXk5aApBbGNmZHVnZUN4YlpPV2IzZVIwdE9uMTNGNlNYbnZodkRaZEZzT0w5SU0rd05ZZVJoUGFyZmVQb2JDQVVLZmVJCitTazllVUovNDVlckhkUWhyMlY1aXdwQXhIUXBza0ZWd3U1OHFQbzdLalFMTG1MeDFISkhMZDFIQW9HQkFQaEwKb3hqTXdFSmZudkZjLy9YUGJRSXgwWndiVUltQWFSYlIwUDIwZU1FWFhuNjZZQ1VIeDl4YVpIVHJqY2N1eXdiVgo1QjJHYThHMzVDN0oydldvZWVaamU2c2xJTE5UMm1jNG4wSWFRSFR1a2ZPWHpKQUhRSEZXQWI2TnZJTlpaK3hGCkJ1YndwWHRYUDlvVFh2MG41MEpTUzgyOW1LWG55c25RbDBNNzk5NmpBb0dBVHE1YmhuOVFMQ0cvZkVNTkFwVkwKdWpnVnZ0VlZUazZkRllYUDBXeXMveTdYRHkrZFhnZEJwMnl6dm1NUE02WkFRbU1DSkhFMUZDYzhIOFRPTTU5RwpWUTFaV2Q2ZVBUN0hQVTU5dmhCcnFUOW55M28vYTB6WWYvZkJvVEZMaUpEVWF1SDc0MEUvN2VkYXBZQm0vWVljCng4L0I5UzNzcDRIYnR1RXJLbUZmendVQ2dZRUE0SDdLMVdaelFzL2dEczBPWkxzS0RaenJyMkNHL2Z2TGlLVm0KZDYxUUxRMnJFNXdCeUJsejNFa2lZUkNGWFIxeTFoaFlLMVhaWWdxWlZyQ050K1YvYWc1eXgzaEhTN3k2VU8vQwpGdXRUY2lZdWNuZkNya3JRT21rUUpMRlVTOUp2Z3hHYVB2NUFNUGZmTkphbElQR09SOG5PM2hQWnk4OTY2K1FjCmo5N05xMDhDZ1lCSWFXb3ZaVDMrOE5hR1hSQTRBaU1RU0MweFNCYlhacitCSHZ4TVlUQ1loMlJKZTVTM0FzNGUKc2Z0d0I2c0MvYXI2bkppOFVpN05VdysrTUk0VUZORE05c0JWUXp3NUUwNlFCSE1TcGxQdXRCRGVXTE5RN0FDNwpkM2Q5bHE4RjhXU3o0c0JiVlZvUjdheUJpU1F2blU1Q3NOSXFBb2ExYUllTzNhdVcyQWd0OEE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
- name: saas-mgmt
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJSFVDZ2pMMXhPRHd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpFeU1qZ3dNVEF5TWpGYUZ3MHlOREF4TURNd016QXhNalJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXBGWVNKT1YrM3QrbWVOQ2sKUnNMdWVyMmFSbm5qRHBvYTlBTkh5TWZ4WWRmYTNRTS9VZGlPUlEraWFTMU1jZjFPUFVzRlloOU1iS2p6S0tCTApGYTh2NkJ0dGN2bVIwTFRLcTY2MHFIUWZOMVUxUEhTUnhROElQTzg5bUhjYkI1Z2N3MktRRWNIWVNsaURBVHM3CllkK2JqMWtabWIrUVIxQlhlZlVoZ0pvYk1UazNrdERBMG1jRlVHSExKakxjUTF2UlNiR2s0NVJRY2d5SlBjdy8KM0VPTXkzcHRzeUpldk5tb3c2N0pmVXp3d1NBM0hOd0paTzB0enVETGprRTNMOXgwVC9idGVXTlI0eUEvTFp2QQppRGkrdmVnMFBJL2FYbHBvejVOVjNwdVQwOGJnRVVWZ0dUdWhRQWtxZkk1SmpjYXBLSXpLRzBMVWdxWXRjQWJRClE3ZDcvd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUSTlPVTBaWkMvU1ZYdVJraWpVL2ZIcU9ZLwpEREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBNGluVHEzU1krcXpTczlGYWwrMXdRNlNWSVJXOEw5anB6NGdsCnNCZGpZV05DbzRzTURTNG95bEh5ZTVhMXZZR3ZqdXBORjUyVG5sL2UzU3U0ekxWTHZNMXd4VHR3aUVqNjlOTGcKR2RwemtuUk5INTM0UHFhaXdxNk9xOWd5MXRIQmIyTHNadlFRN0FBanFKVG5SL01NNVI4cEllSnpFdk5xSHVMdAovWndXRWg2enJXVkVON3IxWVN6elpCRVgxaGh5dE5abUltTkhQNVFPcmxueXZLc1luV0hLL0JTTHNHK2dQc0htCkJzQkhtblNBeTh2Wk5OSWJZQk5SMmJ6WkZiVWExaTZ4aHlxRUNvdEpXMHVvOWlSN0tpbGFnelNJN1hNdUdhZFkKYkFvcHVZWDJTTkROMnhiZWJkOWhxRkRRcHZwUTlXSE00ZTBpNjhjOUgvOTA2MS9hQnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcEZZU0pPViszdCttZU5Da1JzTHVlcjJhUm5uakRwb2E5QU5IeU1meFlkZmEzUU0vClVkaU9SUStpYVMxTWNmMU9QVXNGWWg5TWJLanpLS0JMRmE4djZCdHRjdm1SMExUS3E2NjBxSFFmTjFVMVBIU1IKeFE4SVBPODltSGNiQjVnY3cyS1FFY0hZU2xpREFUczdZZCtiajFrWm1iK1FSMUJYZWZVaGdKb2JNVGsza3REQQowbWNGVUdITEpqTGNRMXZSU2JHazQ1UlFjZ3lKUGN3LzNFT015M3B0c3lKZXZObW93NjdKZlV6d3dTQTNITndKClpPMHR6dURMamtFM0w5eDBUL2J0ZVdOUjR5QS9MWnZBaURpK3ZlZzBQSS9hWGxwb3o1TlYzcHVUMDhiZ0VVVmcKR1R1aFFBa3FmSTVKamNhcEtJektHMExVZ3FZdGNBYlFRN2Q3L3dJREFRQUJBb0lCQUJpd2JhMXBaVFFxdWFIeApCcDB5OEEwMHF4Ym5mUHRXbjdJRlJDV2dGRjIweGticUUvdEI0NjN3ZVYvLzFEcFQ2Z3MvV0NHenZoR2RHRnNFCktnT3AvREtNM0ZhbnRBWjlBdTNrSTNRamJnVXNJZ0ZoS2YxSEV0L0V1YVpNVHAxSGR4ckxsZ1YwNy8vTGFITW8KNlBUOVdTdWlJVHgrRVRrRmt2N1pteHp0Q2lUTXpCQTJ4YW9paWk4dEs1NkM4K1JSeW5wMVlhSnl0VkQra3diZwpwZmErVThlUFEwRXUyeFkzVVRkdHZLSDYrSVVHdUppMVI3Nk1qM3lZRFl3NkJmN2lzMU1tSXhKM3FpdjVFRFhZCjdDSXR2VmxyQTBWczJrREpHVXdYZGhQdHU2RmUzWXc1eFNaRnR2cXkyVnpXcEhvcVB0RG16cy85ZXpnbTExeUUKWDNmR00yRUNnWUVBd0VZSGd2cmxWUXU3RW44RUlQZFVsMnZOQnBueWxZNzFWTnY5Q0dpLzNyKzhQcFNKenBiRQp0azVoTTNmeHJ4Q0s3SGI4NXBKSDl2dzh2akFwb1VmeDVIYnJESkpkS1F4Rlp5RStSRnpHVG1KN0M2VERQanVaCjl1SVhFRkxQd3RndFJqZ2hhUk1MSm16SGlJVkQ0cmpjZHhRTlN3ZWc3ZEhOOFQ5UVVHajhiUmtDZ1lFQTJzMmYKVlFLMnhlaHJLT0VMYVo5a3pvZlYyUUEvN0RFSzdvOHBuOGZhSk1SRVJlSmdTb3MyZ0p2cU5mdVhNMVp1K2crLwpuSno0L0kxSW9aQUdLTllxZHlhLzVrdDRjTkZjMTFrK3MzaE91Z01lbnBKSXNUU2EyS1o4RHl0Sm5pWTZUOFZMClBlWnlSNy9xb2p2dWUyY3NvSTFPUWxHcGFPa09WQ0NaZTc0V1BOY0NnWUVBdHc3MWYrTFlEVXlKNDJaQ3pCQXUKM2F1cEhDdmVKajVoblZXRlowZ3p4U1BQV2RXYURyTEV2cjJKRmJPUXl4aDQ3QUd0YnVpKzA0djdXU2dKdXFBQQowWC9XOGJVNE5TaVZ1MGFQUGc4R1R3SzhHNjNXcFoyaFRNaWRKTkZ6TlJNVXA5SXhIUlVnZklqOHdDSUJMQTdNCitDS0ROWGdoNDhyb3hGTi9aODlNNWFFQ2dZQkFsZXVQTzJMYUhsWHJWaXA1UGd5U2pqUUlmdk5mYzhhSFRvajUKMmhOQlFSSHFFdjFiWTZadDVoZ0hZVUZyYlBzTEl6VHJOTWFtUGNvUHJxU3l6eXp2eU9kaVFpckdHbmF1Tm5DMApwekdONUxmWUZOUVNRclhtZDVZdElCajE3dERObFM0MWtsMXZZbTRPLzJQUTEwNnNBYW4xRjRmTEtPZ0syeWlUCkJ6UW5Od0tCZ1FDWVVZWEtNZm1vSEhGQkJuVjcxY2xQV1BRUC94bmY5SGdnalJJK2dvSDNkRzRxOXdYS2FpaHYKYndIRmNJQXFPYWkzTm1HT1dydzA4bkNENFYyMVZRQWd0ZFdjMFVmS254M2svdFFheHUyM00xaWdkV2JQbXcyUwp4em1TY0sweHVjdkdvQ0VaUWZ0cGlQOVFwUWJIQ0xGNUFIM2tRdExRZDdzOTErZkU2cGtsYlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: awx-test
namespace: ansible-awx
spec:
containers:
- image: nexus2.exem-oss.org/awx-ee:latest
imagePullPolicy: Always
name: awx-test
resources: {}
securityContext:
privileged: true

View File

@@ -0,0 +1,19 @@
---
- hosts: all
become: true
roles:
- connect-settings
# - teleport
vars:
username: dev2
adminuser: root
manual_password: saasadmin1234
sshmainport: 2222
#encrypt: 1
#debug_mode: True
teleport_uri: teleport.kr.datasaker.io
# remove: True
# custom_labels: 'user=havelight,company=exem'
update: True
# install: True

19
ansible/01_old/infra-test Normal file
View File

@@ -0,0 +1,19 @@
[redhat]
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
[ubuntu]
#10.10.43.234 ansible_port=22 ansible_user=root
10.10.43.180 ansible_port=22 ansible_user=ubuntu
[tmp]
10.10.43.147 ansible_port=2222 ansible_user=ubuntu
[proxy]
10.10.43.20
[agent:children]
redhat
ubuntu

BIN
ansible/01_old/inventory/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,2 @@
[api]
10.10.43.[170:199]

View File

@@ -0,0 +1,14 @@
[dsk_dev]
10.10.43.[1:253]
[bastion]
10.10.43.43 ansible_port=2222
[old_ip]
10.10.30.100
10.10.30.219
10.10.31.33
10.10.31.37
10.10.31.243
10.10.34.128
10.10.34.172

View File

@@ -0,0 +1,2 @@
[api]
10.10.43.[160:169]

View File

@@ -0,0 +1,52 @@
[prod-master]
i-041b16dbf097aa03f ansible_user=ubuntu
i-0446ae551deac0b98 ansible_user=ubuntu
i-0628e1521b484fde1 ansible_user=ubuntu
[prod-node]
i-001143efb27f5c473 ansible_user=ubuntu
i-00608e3310900aff0 ansible_user=ubuntu
i-009e56755c657e557 ansible_user=ubuntu
i-01b968c93a1e29888 ansible_user=ubuntu
i-0317fd83555444cda ansible_user=ubuntu
i-040d9267937974ffc ansible_user=ubuntu
i-041f2d6ebcbb50593 ansible_user=ubuntu
i-046572392b64748b1 ansible_user=ubuntu
i-04712bc4966dadc68 ansible_user=ubuntu
i-04abf3bdccadaf05c ansible_user=ubuntu
i-0524126bf5fdc1483 ansible_user=ubuntu
i-06b7d476688a345e6 ansible_user=ubuntu
i-06c80df6180d036bf ansible_user=ubuntu
i-076a7682ef711c83a ansible_user=ubuntu
i-079a7ec37fe655415 ansible_user=ubuntu
i-08125130e3caf704f ansible_user=ubuntu
i-088361873fe1f1eb2 ansible_user=ubuntu
i-08b15bdf81b808b23 ansible_user=ubuntu
i-0986b098c1961eb8d ansible_user=ubuntu
i-09a8af86b9a3d6474 ansible_user=ubuntu
i-09ecd63d914f62fc3 ansible_user=ubuntu
i-0a0942fb91f9968d8 ansible_user=ubuntu
i-0a768908aade20566 ansible_user=ubuntu
i-0d5eb423a05b70f84 ansible_user=ubuntu
i-0dc48b11bbb330012 ansible_user=ubuntu
i-0dd28df2ff60bf63b ansible_user=ubuntu
i-0ddf860cc0e3c4b92 ansible_user=ubuntu
i-0e4a89bcc7c6421bf ansible_user=ubuntu
i-0e5e379f9b04cd2fa ansible_user=ubuntu
i-0f0b728f94d19d020 ansible_user=ubuntu
i-0f4f0edc7a431de84 ansible_user=ubuntu
i-0f5eef7ed3a20e103 ansible_user=ubuntu
[prod-spot]
i-002c94ea83e2312ea ansible_user=ubuntu
i-05877be601a14cc23 ansible_user=ubuntu
i-0619e290ef4d67954 ansible_user=ubuntu
i-0bd24dc5f5a740dcb ansible_user=ubuntu
i-0e3edc671c6bccfb7 ansible_user=ubuntu
[all:children]
prod-master
prod-node
[all:vars]
ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q ubuntu@bastion.kr.datasaker.io"'

13
ansible/01_old/inventory/cmoa Executable file
View File

@@ -0,0 +1,13 @@
[cmoa_1]
10.10.43.[200:209]
[cmoa_2]
10.10.43.[210:219]
[cmoa_etc]
10.10.43.[220:229]
[cmoa:children]
cmoa_1
cmoa_2
cmoa_etc

View File

@@ -0,0 +1,5 @@
[dsk_dev]
10.10.43.[100:159]
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=dev2-iac

7
ansible/01_old/inventory/etc Executable file
View File

@@ -0,0 +1,7 @@
[etc]
10.10.43.[1:10]
10.10.43.[14:30]
10.10.43.[44:99]
[xcp-ng]
10.10.43.[31:42]

View File

@@ -0,0 +1,2 @@
[infra]
10.10.43.[230:249]

View File

@@ -0,0 +1,76 @@
[all]
10.10.43.100 ansible_port=2222 ansible_user=dev2
10.10.43.101 ansible_port=2222 ansible_user=dev2
10.10.43.105 ansible_port=2222 ansible_user=dev2
10.10.43.106 ansible_port=2222 ansible_user=dev2
10.10.43.111 ansible_port=2222 ansible_user=dev2
10.10.43.112 ansible_port=2222 ansible_user=dev2
10.10.43.113 ansible_port=2222 ansible_user=dev2
10.10.43.114 ansible_port=2222 ansible_user=dev2
10.10.43.115 ansible_port=2222 ansible_user=dev2
10.10.43.116 ansible_port=2222 ansible_user=dev2
10.10.43.117 ansible_port=2222 ansible_user=dev2
10.10.43.118 ansible_port=2222 ansible_user=dev2
10.10.43.119 ansible_port=2222 ansible_user=dev2
10.10.43.120 ansible_port=2222 ansible_user=dev2
10.10.43.121 ansible_port=2222 ansible_user=dev2
10.10.43.122 ansible_port=2222 ansible_user=dev2
10.10.43.123 ansible_port=2222 ansible_user=dev2
10.10.43.124 ansible_port=2222 ansible_user=dev2
10.10.43.125 ansible_port=2222 ansible_user=dev2
10.10.43.126 ansible_port=2222 ansible_user=dev2
10.10.43.127 ansible_port=2222 ansible_user=dev2
10.10.43.128 ansible_port=2222 ansible_user=dev2
10.10.43.129 ansible_port=2222 ansible_user=dev2
10.10.43.130 ansible_port=2222 ansible_user=dev2
10.10.43.131 ansible_port=2222 ansible_user=dev2
10.10.43.132 ansible_port=2222 ansible_user=dev2
10.10.43.133 ansible_port=2222 ansible_user=dev2
10.10.43.134 ansible_port=2222 ansible_user=dev2
10.10.43.135 ansible_port=2222 ansible_user=dev2
10.10.43.136 ansible_port=2222 ansible_user=dev2
10.10.43.137 ansible_port=2222 ansible_user=dev2
10.10.43.138 ansible_port=2222 ansible_user=dev2
10.10.43.139 ansible_port=2222 ansible_user=dev2
10.10.43.140 ansible_port=2222 ansible_user=dev2
10.10.43.141 ansible_port=2222 ansible_user=dev2
10.10.43.142 ansible_port=2222 ansible_user=dev2
10.10.43.143 ansible_port=2222 ansible_user=dev2
10.10.43.144 ansible_port=2222 ansible_user=dev2
10.10.43.145 ansible_port=2222 ansible_user=dev2
10.10.43.146 ansible_port=2222 ansible_user=dev2
10.10.43.147 ansible_port=2222 ansible_user=dev2
10.10.43.148 ansible_port=2222 ansible_user=dev2
10.10.43.151 ansible_port=2222 ansible_user=dev2
10.10.43.152 ansible_port=2222 ansible_user=dev2
10.10.43.153 ansible_port=2222 ansible_user=dev2
10.10.43.164 ansible_port=2222 ansible_user=dev2
10.10.43.165 ansible_port=2222 ansible_user=dev2
10.10.43.166 ansible_port=2222 ansible_user=dev2
10.10.43.167 ansible_port=2222 ansible_user=dev2
10.10.43.168 ansible_port=2222 ansible_user=dev2
10.10.43.169 ansible_port=2222 ansible_user=dev2
10.10.43.171 ansible_port=2222 ansible_user=dev2
10.10.43.172 ansible_port=2222 ansible_user=dev2
10.10.43.173 ansible_port=2222 ansible_user=dev2
10.10.43.174 ansible_port=2222 ansible_user=dev2
10.10.43.175 ansible_port=2222 ansible_user=dev2
10.10.43.176 ansible_port=2222 ansible_user=dev2
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
10.10.43.185 ansible_port=2222 ansible_user=dev2
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
10.10.43.189 ansible_port=2222 ansible_user=dev2
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2

View File

@@ -0,0 +1,65 @@
[prod-demo-master]
10.10.43.100 ansible_port=2222 ansible_user=dev2
[prod-demo-worker]
10.10.43.101 ansible_port=2222 ansible_user=dev2
[dev-demo-master]
10.10.43.105 ansible_port=2222 ansible_user=dev2
[dev-demo-worker]
10.10.43.106 ansible_port=2222 ansible_user=dev2
[saas_mgmt_master]
10.10.43.240 ansible_port=2222 ansible_user=dev2
[saas_mgmt_node]
10.10.43.[241:243] ansible_port=2222 ansible_user=dev2
[dsk_dev_master]
10.10.43.[111:113] ansible_port=2222 ansible_user=dev2
[dsk_dev_node]
10.10.43.[114:153] ansible_port=2222 ansible_user=dev2
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[agent_host]
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
[agent_cri_master]
10.10.43.185 ansible_port=2222 ansible_user=dev2
[agent_cri_worker]
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
[agent_middleware_master]
10.10.43.189 ansible_port=2222 ansible_user=dev2
[agent_middleware_worker]
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2
[all:children]
saas_mgmt_master
saas_mgmt_node
dsk_dev_master
dsk_dev_node
bastion
agent_host
agent_cri_master
agent_cri_worker
agent_middleware_master
agent_middleware_worker

View File

@@ -0,0 +1,41 @@
iptables -A INPUT -s 10.10.45.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.47.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.48.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.50.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.37.0/24 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.45.0/24 -j DROP
iptables -A INPUT -s 10.10.47.0/24 -j DROP
iptables -A INPUT -s 10.10.48.0/24 -j DROP
iptables -A INPUT -s 10.10.50.0/24 -j DROP
iptables -A INPUT -s 10.10.37.0/24 -j DROP
iptables -A INPUT -s 10.10.43.200 -j LOG --log-prefix "Dropped: "
iptables -A INPUT -s 10.10.43.200 -j DROP
- { source: "", target: "DROP" }
- { source: "10.10.45.0/24", target: "DROP" }
- { source: "10.10.47.0/24", target: "DROP" }
- { source: "10.10.48.0/24", target: "DROP" }
- { source: "10.10.50.0/24", target: "DROP" }
- { source: "10.10.37.0/24", target: "DROP" }
- { source: "10.10.45.196", target: "DROP" }
- { source: "10.10.47.20", target: "DROP" }
- { source: "10.10.47.231", target: "DROP" }
- { source: "10.10.47.233", target: "DROP" }
- { source: "10.10.48.252", target: "DROP" }
- { source: "10.10.50.232", target: "DROP" }
- { source: "10.10.50.233", target: "DROP" }
10.10.45.174
10.10.45.196
10.10.47.20
10.10.47.231
10.10.47.233
10.10.48.252
10.10.50.232
10.10.50.233

View File

@@ -0,0 +1,41 @@
[test-ubuntu]
10.10.43.[164:166] ansible_port=2222 ansible_user=dev2-iac
[test-centos]
10.10.43.[167:169] ansible_port=2222 ansible_user=dev2-iac
[test-openshfit]
10.10.43.171 ansible_port=2222 ansible_user=dev2-iac
[test-opensuse]
10.10.43.172 ansible_port=2222 ansible_user=dev2-iac
[test-debian]
10.10.43.173 ansible_port=2222 ansible_user=dev2-iac
[test-oracle]
10.10.43.174 ansible_port=2222 ansible_user=dev2-iac
[test-amazon]
10.10.43.175 ansible_port=2222 ansible_user=dev2-iac
[test-db_env_cluster]
10.10.43.176 ansible_port=2222 ansible_user=dev2-iac
[test-redhat]
10.10.43.[177:179] ansible_port=2222 ansible_user=dev2-iac
[test-docker]
10.10.43.180 ansible_port=2222 ansible_user=dev2-iac
[all:children]
test-ubuntu
test-centos
test-openshfit
test-opensuse
test-debian
test-oracle
test-amazon
test-db_env_cluster
test-redhat
test-docker

View File

@@ -0,0 +1,76 @@
[server]
10.10.43.100 ansible_port=2222 ansible_user=dev2
10.10.43.101 ansible_port=2222 ansible_user=dev2
10.10.43.105 ansible_port=2222 ansible_user=dev2
10.10.43.106 ansible_port=2222 ansible_user=dev2
10.10.43.111 ansible_port=2222 ansible_user=dev2
10.10.43.112 ansible_port=2222 ansible_user=dev2
10.10.43.113 ansible_port=2222 ansible_user=dev2
10.10.43.114 ansible_port=2222 ansible_user=dev2
10.10.43.115 ansible_port=2222 ansible_user=dev2
10.10.43.116 ansible_port=2222 ansible_user=dev2
10.10.43.117 ansible_port=2222 ansible_user=dev2
10.10.43.118 ansible_port=2222 ansible_user=dev2
10.10.43.119 ansible_port=2222 ansible_user=dev2
10.10.43.120 ansible_port=2222 ansible_user=dev2
10.10.43.121 ansible_port=2222 ansible_user=dev2
10.10.43.122 ansible_port=2222 ansible_user=dev2
10.10.43.123 ansible_port=2222 ansible_user=dev2
10.10.43.124 ansible_port=2222 ansible_user=dev2
10.10.43.125 ansible_port=2222 ansible_user=dev2
10.10.43.126 ansible_port=2222 ansible_user=dev2
10.10.43.127 ansible_port=2222 ansible_user=dev2
10.10.43.128 ansible_port=2222 ansible_user=dev2
10.10.43.129 ansible_port=2222 ansible_user=dev2
10.10.43.130 ansible_port=2222 ansible_user=dev2
10.10.43.131 ansible_port=2222 ansible_user=dev2
10.10.43.132 ansible_port=2222 ansible_user=dev2
10.10.43.133 ansible_port=2222 ansible_user=dev2
10.10.43.134 ansible_port=2222 ansible_user=dev2
10.10.43.135 ansible_port=2222 ansible_user=dev2
10.10.43.136 ansible_port=2222 ansible_user=dev2
10.10.43.137 ansible_port=2222 ansible_user=dev2
10.10.43.138 ansible_port=2222 ansible_user=dev2
10.10.43.139 ansible_port=2222 ansible_user=dev2
10.10.43.140 ansible_port=2222 ansible_user=dev2
10.10.43.141 ansible_port=2222 ansible_user=dev2
10.10.43.142 ansible_port=2222 ansible_user=dev2
10.10.43.143 ansible_port=2222 ansible_user=dev2
10.10.43.144 ansible_port=2222 ansible_user=dev2
10.10.43.145 ansible_port=2222 ansible_user=dev2
10.10.43.146 ansible_port=2222 ansible_user=dev2
10.10.43.147 ansible_port=2222 ansible_user=dev2
10.10.43.148 ansible_port=2222 ansible_user=dev2
10.10.43.151 ansible_port=2222 ansible_user=dev2
10.10.43.152 ansible_port=2222 ansible_user=dev2
10.10.43.153 ansible_port=2222 ansible_user=dev2
10.10.43.164 ansible_port=2222 ansible_user=dev2
10.10.43.165 ansible_port=2222 ansible_user=dev2
10.10.43.166 ansible_port=2222 ansible_user=dev2
10.10.43.167 ansible_port=2222 ansible_user=dev2
10.10.43.168 ansible_port=2222 ansible_user=dev2
10.10.43.169 ansible_port=2222 ansible_user=dev2
10.10.43.171 ansible_port=2222 ansible_user=dev2
10.10.43.174 ansible_port=2222 ansible_user=dev2
10.10.43.176 ansible_port=2222 ansible_user=dev2
10.10.43.177 ansible_port=2222 ansible_user=dev2
10.10.43.178 ansible_port=2222 ansible_user=dev2
10.10.43.179 ansible_port=2222 ansible_user=dev2
10.10.43.180 ansible_port=2222 ansible_user=dev2
10.10.43.181 ansible_port=2222 ansible_user=dev2
10.10.43.182 ansible_port=2222 ansible_user=dev2
10.10.43.185 ansible_port=2222 ansible_user=dev2
10.10.43.186 ansible_port=2222 ansible_user=dev2
10.10.43.187 ansible_port=2222 ansible_user=dev2
10.10.43.188 ansible_port=2222 ansible_user=dev2
10.10.43.189 ansible_port=2222 ansible_user=dev2
10.10.43.190 ansible_port=2222 ansible_user=dev2
10.10.43.191 ansible_port=2222 ansible_user=dev2
10.10.43.192 ansible_port=2222 ansible_user=dev2
10.10.43.193 ansible_port=2222 ansible_user=dev2
10.10.43.194 ansible_port=2222 ansible_user=dev2
10.10.43.199 ansible_port=2222 ansible_user=dev2
[all:children]
server

3
ansible/01_old/inventory2 Executable file
View File

@@ -0,0 +1,3 @@
[servers]
10.10.43.100

33
ansible/01_old/inventory_agent Executable file
View File

@@ -0,0 +1,33 @@
[agent_host]
10.10.43.177
10.10.43.178
10.10.43.179
10.10.43.180 ansible_user=dev2
10.10.43.182 ansible_user=dev2
[agent_cri_master]
10.10.43.185
[agent_cri_worker]
10.10.43.186
10.10.43.187
10.10.43.188
[agent_middleware_master]
10.10.43.189
[agent_middleware_worker]
10.10.43.190
10.10.43.191
10.10.43.192
10.10.43.193
10.10.43.194
10.10.43.199
[all:children]
agent_host
agent_cri_master
agent_cri_worker
agent_middleware_master
agent_middleware_worker

32
ansible/01_old/inventory_api Executable file
View File

@@ -0,0 +1,32 @@
[master]
10.10.43.160
[worker1]
10.10.43.161
[worker2]
10.10.43.162
[worker3]
10.10.43.163
[cluster:children]
master
worker1
worker2
worker3
[master:vars]
kubernetes_role="master"
runtime="containerd"
[worker1:vars]
kubernetes_role="node"
runtime="containerd"
[worker2:vars]
kubernetes_role="node"
runtime="containerd"
[worker3:vars]
kubernetes_role="node"
runtime="containerd"

22
ansible/01_old/inventory_bak Executable file
View File

@@ -0,0 +1,22 @@
[release]
10.10.43.100
10.10.43.101
[amazon]
15.165.19.5 ansible_user=ec2-user
[ubuntu]
13.125.123.49 ansible_user=ubuntu
[aws:children]
amazon
ubuntu
[monitoring]
3.38.1.96
[bastion]
10.10.43.43 ansible_port=2222 ansible_user=havelight
[hong]
10.10.52.55

18
ansible/01_old/inventory_cent Executable file
View File

@@ -0,0 +1,18 @@
[master]
10.10.43.244
[worker1]
10.10.43.245
[cluster:children]
master
worker1
[master:vars]
kubernetes_role="master"
runtime="containerd"
[worker1:vars]
kubernetes_role="node"
runtime="containerd"

View File

@@ -0,0 +1,35 @@
[host]
10.10.43.111
10.10.43.112
10.10.43.113
10.10.43.114
10.10.43.115
10.10.43.116
10.10.43.117
10.10.43.118
10.10.43.119
10.10.43.120
10.10.43.121
10.10.43.122
10.10.43.123
10.10.43.124
10.10.43.125
10.10.43.126
10.10.43.127
10.10.43.128
10.10.43.129
10.10.43.130
10.10.43.131
10.10.43.132
10.10.43.133
10.10.43.134
10.10.43.135
10.10.43.136
10.10.43.137
10.10.43.138
10.10.43.139
10.10.43.140
10.10.43.141
[test]
10.10.43.142

View File

@@ -0,0 +1,75 @@
[local]
127.0.0.1
[nas]
10.10.43.42 ansible_user=exemdev2 ansible_port=2222
[aws]
3.39.22.205 ansible_user=ec2-user
[centos]
10.10.43.231 ansible_user=centos
10.10.43.232 ansible_user=centos
10.10.43.233 ansible_user=centos
[redhat]
10.10.43.177 ansible_user=redhat
10.10.43.178 ansible_user=redhat
10.10.43.179 ansible_user=redhat
[ubuntu]
10.10.43.97 ansible_user=ubuntu
10.10.43.234 ansible_user=ubuntu
[agent:children]
centos
redhat
ubuntu
[test]
10.10.43.111 ansible_user=dev2 ansible_port=2222
10.10.43.112 ansible_user=dev2 ansible_port=2222
10.10.43.113 ansible_user=dev2 ansible_port=2222
10.10.43.114 ansible_user=dev2 ansible_port=2222
10.10.43.115 ansible_user=dev2 ansible_port=2222
10.10.43.116 ansible_user=dev2 ansible_port=2222
10.10.43.117 ansible_user=dev2 ansible_port=2222
10.10.43.118 ansible_user=dev2 ansible_port=2222
10.10.43.119 ansible_user=dev2 ansible_port=2222
10.10.43.120 ansible_user=dev2 ansible_port=2222
10.10.43.121 ansible_user=dev2 ansible_port=2222
10.10.43.122 ansible_user=dev2 ansible_port=2222
10.10.43.123 ansible_user=dev2 ansible_port=2222
10.10.43.124 ansible_user=dev2 ansible_port=2222
10.10.43.125 ansible_user=dev2 ansible_port=2222
10.10.43.126 ansible_user=dev2 ansible_port=2222
10.10.43.127 ansible_user=dev2 ansible_port=2222
10.10.43.128 ansible_user=dev2 ansible_port=2222
10.10.43.129 ansible_user=dev2 ansible_port=2222
10.10.43.130 ansible_user=dev2 ansible_port=2222
10.10.43.131 ansible_user=dev2 ansible_port=2222
10.10.43.132 ansible_user=dev2 ansible_port=2222
10.10.43.133 ansible_user=dev2 ansible_port=2222
10.10.43.134 ansible_user=dev2 ansible_port=2222
10.10.43.135 ansible_user=dev2 ansible_port=2222
10.10.43.136 ansible_user=dev2 ansible_port=2222
10.10.43.137 ansible_user=dev2 ansible_port=2222
10.10.43.138 ansible_user=dev2 ansible_port=2222
10.10.43.139 ansible_user=dev2 ansible_port=2222
10.10.43.140 ansible_user=dev2 ansible_port=2222
10.10.43.141 ansible_user=dev2 ansible_port=2222
10.10.43.142 ansible_user=dev2 ansible_port=2222
10.10.43.143 ansible_user=dev2 ansible_port=2222
10.10.43.144 ansible_user=dev2 ansible_port=2222
10.10.43.145 ansible_user=dev2 ansible_port=2222
10.10.43.146 ansible_user=dev2 ansible_port=2222
10.10.43.148 ansible_user=dev2 ansible_port=2222
10.10.43.151 ansible_user=dev2 ansible_port=2222
10.10.43.152 ansible_user=dev2 ansible_port=2222
10.10.43.153 ansible_user=dev2 ansible_port=2222
10.10.43.240 ansible_user=dev2 ansible_port=2222
10.10.43.241 ansible_user=dev2 ansible_port=2222
10.10.43.242 ansible_user=dev2 ansible_port=2222
10.10.43.243 ansible_user=dev2 ansible_port=2222
10.10.43.43 ansible_user=dev2 ansible_port=2222
10.10.43.98 ansible_user=dev2 ansible_port=2222

View File

@@ -0,0 +1,29 @@
[all]
tmp_k8s_master_1 ansible_host=10.10.43.51 ip=10.10.43.51 ansible_user=dev2 ansible_port=2222
tmp_k8s_master_2 ansible_host=10.10.43.52 ip=10.10.43.52 ansible_user=dev2 ansible_port=2222
tmp_k8s_master_3 ansible_host=10.10.43.53 ip=10.10.43.53 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_1 ansible_host=10.10.43.54 ip=10.10.43.54 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_2 ansible_host=10.10.43.55 ip=10.10.43.55 ansible_user=dev2 ansible_port=2222
tmp_k8s_worker_3 ansible_host=10.10.43.56 ip=10.10.43.56 ansible_user=dev2 ansible_port=2222
[kube_control_plane]
tmp_k8s_master_1
tmp_k8s_master_2
tmp_k8s_master_3
[etcd]
tmp_k8s_master_1
tmp_k8s_master_2
tmp_k8s_master_3
[kube_node]
tmp_k8s_worker_1
tmp_k8s_worker_2
tmp_k8s_worker_3
[calico_rr]
[k8s_cluster:children]
kube_control_plane
kube_node
calico_rr

View File

@@ -0,0 +1,11 @@
---
- hosts: all
become: true
roles:
- password_change
vars:
username: dev2-iac
adminuser: root
#manual_password: saasadmin1234
sshmainport: 2222
debug_mode: True

View File

@@ -0,0 +1,11 @@
---
- hosts: all
become: true
roles:
- password_change
vars:
username: dev2
adminuser: root
#manual_password: saasadmin1234
sshmainport: 2222
debug_mode: True

View File

@@ -0,0 +1,10 @@
---
- name: "restart"
hosts: all
become: yes
tasks:
- name: Restart teleport service
ansible.builtin.systemd:
name: teleport
enabled: true
state: restarted

BIN
ansible/01_old/roles/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@@ -0,0 +1,140 @@
helm_checksum: sha256:72f1c0fcfb17b41b89087e9232e50f20c606e44a0edc2bb9737e05d1c75b8c4f
helm_version: v3.10.2
kubernetes_version: 1.23.16
kubernetes_kubelet_extra_args: ""
kubernetes_kubeadm_init_extra_opts: ""
kubernetes_join_command_extra_opts: ""
kubernetes_pod_network:
cni: 'calico'
cidr: '10.96.0.0/12'
kubernetes_calico_manifest_file: https://docs.projectcalico.org/manifests/calico.yaml
kubernetes_metric_server_file: https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
containerd_config:
version: 2
root: /var/lib/containerd
state: /run/containerd
plugin_dir: ""
disabled_plugins: []
required_plugins: []
oom_score: 0
grpc:
address: /run/containerd/containerd.sock
tcp_address: ""
tcp_tls_cert: ""
tcp_tls_key: ""
uid: 0
gid: 0
max_recv_message_size: 16777216
max_send_message_size: 16777216
ttrpc:
address: ""
uid: 0
gid: 0
debug:
address: ""
uid: 0
gid: 0
level: ""
metrics:
address: ""
grpc_histogram: false
cgroup:
path: ""
timeouts:
"io.containerd.timeout.shim.cleanup": 5s
"io.containerd.timeout.shim.load": 5s
"io.containerd.timeout.shim.shutdown": 3s
"io.containerd.timeout.task.state": 2s
plugins:
"io.containerd.gc.v1.scheduler":
pause_threshold: 0.02
deletion_threshold: 0
mutation_threshold: 100
schedule_delay: 0s
startup_delay: 100ms
"io.containerd.grpc.v1.cri":
disable_tcp_service: true
stream_server_address: 127.0.0.1
stream_server_port: "0"
stream_idle_timeout: 4h0m0s
enable_selinux: false
sandbox_image: k8s.gcr.io/pause:3.1
stats_collect_period: 10
systemd_cgroup: false
enable_tls_streaming: false
max_container_log_line_size: 16384
disable_cgroup: false
disable_apparmor: false
restrict_oom_score_adj: false
max_concurrent_downloads: 3
disable_proc_mount: false
containerd:
snapshotter: overlayfs
default_runtime_name: runc
no_pivot: false
default_runtime:
runtime_type: ""
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
untrusted_workload_runtime:
runtime_type: ""
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
runtimes:
runc:
runtime_type: io.containerd.runc.v1
runtime_engine: ""
runtime_root: ""
privileged_without_host_devices: false
cni:
bin_dir: /opt/cni/bin
conf_dir: /etc/cni/net.d
max_conf_num: 1
conf_template: ""
registry:
configs:
"10.10.31.243:5000":
tls:
insecure_skip_verify: true
mirrors:
"docker.io":
endpoint:
- https://registry-1.docker.io
"10.10.31.243:5000":
endpoint:
- http://10.10.31.243:5000
x509_key_pair_streaming:
tls_cert_file: ""
tls_key_file: ""
"io.containerd.internal.v1.opt":
path: /opt/containerd
"io.containerd.internal.v1.restart":
interval: 10s
"io.containerd.metadata.v1.bolt":
content_sharing_policy: shared
"io.containerd.monitor.v1.cgroups":
no_prometheus: false
"io.containerd.runtime.v1.linux":
shim: containerd-shim
runtime: runc
runtime_root: ""
no_shim: false
shim_debug: false
"io.containerd.runtime.v2.task":
platforms:
- linux/amd64
"io.containerd.service.v1.diff-service":
default:
- walking
"io.containerd.snapshotter.v1.devmapper":
root_path: ""
pool_name: ""
base_image_size: ""

View File

@@ -0,0 +1,645 @@
#!/bin/sh
set -e
# Docker CE for Linux installation script
#
# See https://docs.docker.com/engine/install/ for the installation steps.
#
# This script is meant for quick & easy install via:
# $ curl -fsSL https://get.docker.com -o get-docker.sh
# $ sh get-docker.sh
#
# For test builds (ie. release candidates):
# $ curl -fsSL https://test.docker.com -o test-docker.sh
# $ sh test-docker.sh
#
# NOTE: Make sure to verify the contents of the script
# you downloaded matches the contents of install.sh
# located at https://github.com/docker/docker-install
# before executing.
#
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA="66474034547a96caa0a25be56051ff8b726a1b28"
# strip "v" prefix if present
VERSION="${VERSION#v}"
# The channel to install from:
# * nightly
# * test
# * stable
# * edge (deprecated)
DEFAULT_CHANNEL_VALUE="stable"
if [ -z "$CHANNEL" ]; then
CHANNEL=$DEFAULT_CHANNEL_VALUE
fi
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi
DEFAULT_REPO_FILE="docker-ce.repo"
if [ -z "$REPO_FILE" ]; then
REPO_FILE="$DEFAULT_REPO_FILE"
fi
mirror=''
DRY_RUN=${DRY_RUN:-}
while [ $# -gt 0 ]; do
case "$1" in
--mirror)
mirror="$2"
shift
;;
--dry-run)
DRY_RUN=1
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
Aliyun)
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
;;
AzureChinaCloud)
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
# version_gte checks if the version specified in $VERSION is at least
# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either
# unset (=latest) or newer or equal than the specified version. Returns 1 (fail)
# otherwise.
#
# examples:
#
# VERSION=20.10
# version_gte 20.10 // 0 (success)
# version_gte 19.03 // 0 (success)
# version_gte 21.10 // 1 (fail)
version_gte() {
if [ -z "$VERSION" ]; then
return 0
fi
eval calver_compare "$VERSION" "$1"
}
# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success)
# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch
# releases and pre-release (-alpha/-beta) are not taken into account
#
# examples:
#
# calver_compare 20.10 19.03 // 0 (success)
# calver_compare 20.10 20.10 // 0 (success)
# calver_compare 19.03 20.10 // 1 (fail)
calver_compare() (
set +x
yy_a="$(echo "$1" | cut -d'.' -f1)"
yy_b="$(echo "$2" | cut -d'.' -f1)"
if [ "$yy_a" -lt "$yy_b" ]; then
return 1
fi
if [ "$yy_a" -gt "$yy_b" ]; then
return 0
fi
mm_a="$(echo "$1" | cut -d'.' -f2)"
mm_b="$(echo "$2" | cut -d'.' -f2)"
if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then
return 1
fi
return 0
)
is_dry_run() {
if [ -z "$DRY_RUN" ]; then
return 1
else
return 0
fi
}
is_wsl() {
case "$(uname -r)" in
*microsoft* ) true ;; # WSL 2
*Microsoft* ) true ;; # WSL 1
* ) false;;
esac
}
is_darwin() {
case "$(uname -s)" in
*darwin* ) true ;;
*Darwin* ) true ;;
* ) false;;
esac
}
deprecation_notice() {
distro=$1
distro_version=$2
echo
printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
echo " No updates or security fixes will be released for this distribution, and users are recommended"
echo " to upgrade to a currently maintained version of $distro."
echo
printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
echo
sleep 10
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
echo_docker_as_nonroot() {
if is_dry_run; then
return
fi
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
echo
echo "================================================================================"
echo
if version_gte "20.10"; then
echo "To run Docker as a non-privileged user, consider setting up the"
echo "Docker daemon in rootless mode for your user:"
echo
echo " dockerd-rootless-setuptool.sh install"
echo
echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
echo
fi
echo
echo "To run the Docker daemon as a fully privileged service, but granting non-root"
echo "users access, refer to https://docs.docker.com/go/daemon-access/"
echo
echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
echo " documentation for details: https://docs.docker.com/go/attack-surface/"
echo
echo "================================================================================"
echo
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
if [ "$lsb_dist" = "osmc" ]; then
# OSMC runs Raspbian
lsb_dist=raspbian
else
# We're Debian and don't even know it!
lsb_dist=debian
fi
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
fi
fi
fi
}
do_install() {
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
if command_exists docker; then
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
again to update Docker, you can safely ignore this message.
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
if is_dry_run; then
sh_c="echo"
fi
# perform some very rudimentary platform detection
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
if is_wsl; then
echo
echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
11)
dist_version="bullseye"
;;
10)
dist_version="buster"
;;
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
esac
;;
centos|rhel|sles)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --release | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Print deprecation warnings for distro versions that recently reached EOL,
# but may still be commonly used (especially LTS versions).
case "$lsb_dist.$dist_version" in
debian.stretch|debian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
raspbian.stretch|raspbian.jessie)
deprecation_notice "$lsb_dist" "$dist_version"
;;
ubuntu.xenial|ubuntu.trusty)
deprecation_notice "$lsb_dist" "$dist_version"
;;
fedora.*)
if [ "$dist_version" -lt 33 ]; then
deprecation_notice "$lsb_dist" "$dist_version"
fi
;;
esac
# Run setup for each distro accordingly
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="apt-transport-https ca-certificates curl"
if ! command -v gpg > /dev/null; then
pre_reqs="$pre_reqs gnupg"
fi
apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
(
if ! is_dry_run; then
set -x
fi
$sh_c 'apt-get update -qq >/dev/null'
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
$sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings'
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg"
$sh_c "chmod a+r /etc/apt/keyrings/docker.gpg"
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
$sh_c 'apt-get update -qq >/dev/null'
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g")"
search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
echo
exit 1
fi
if version_gte "18.09"; then
search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
echo "INFO: $search_command"
cli_pkg_version="=$($sh_c "$search_command")"
fi
pkg_version="=$pkg_version"
fi
fi
(
pkgs="docker-ce${pkg_version%=}"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pkgs >/dev/null"
)
echo_docker_as_nonroot
exit 0
;;
centos|fedora|rhel)
if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then
echo "Packages for RHEL are currently only available for s390x."
exit 1
fi
if [ "$lsb_dist" = "fedora" ]; then
pkg_manager="dnf"
config_manager="dnf config-manager"
enable_channel_flag="--set-enabled"
disable_channel_flag="--set-disabled"
pre_reqs="dnf-plugins-core"
pkg_suffix="fc$dist_version"
else
pkg_manager="yum"
config_manager="yum-config-manager"
enable_channel_flag="--enable"
disable_channel_flag="--disable"
pre_reqs="yum-utils"
pkg_suffix="el"
fi
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
(
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pre_reqs"
$sh_c "$config_manager --add-repo $repo_file_url"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "$config_manager $disable_channel_flag docker-ce-*"
$sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
fi
$sh_c "$pkg_manager makecache"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
echo
exit 1
fi
if version_gte "18.09"; then
# older versions don't support a cli package
search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
fi
# Cut out the epoch and prefix with a '-'
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
# older versions didn't ship the cli and containerd as separate packages
if [ -n "$cli_pkg_version" ]; then
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
# also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
pkgs="$pkgs docker-scan-plugin"
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
sles)
if [ "$(uname -m)" != "s390x" ]; then
echo "Packages for SLES are currently only available for s390x"
exit 1
fi
if [ "$dist_version" = "15.3" ]; then
sles_version="SLE_15_SP3"
else
sles_minor_version="${dist_version##*.}"
sles_version="15.$sles_minor_version"
fi
opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/$sles_version/security:SELinux.repo"
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
pre_reqs="ca-certificates curl libseccomp2 awk"
(
if ! is_dry_run; then
set -x
fi
$sh_c "zypper install -y $pre_reqs"
$sh_c "zypper addrepo $repo_file_url"
if ! is_dry_run; then
cat >&2 <<-'EOF'
WARNING!!
openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now.
Do you wish to continue?
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 30 )
fi
$sh_c "zypper addrepo $opensuse_repo"
$sh_c "zypper --gpg-auto-import-keys refresh"
$sh_c "zypper lr -d"
)
pkg_version=""
if [ -n "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")"
search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst zypper list results"
echo
exit 1
fi
search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
# It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
cli_pkg_version="$($sh_c "$search_command")"
pkg_version="-$pkg_version"
fi
fi
(
pkgs="docker-ce$pkg_version"
if version_gte "18.09"; then
if [ -n "$cli_pkg_version" ]; then
# older versions didn't ship the cli and containerd as separate packages
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
else
pkgs="$pkgs docker-ce-cli containerd.io"
fi
fi
if version_gte "20.10"; then
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
fi
if version_gte "23.0"; then
pkgs="$pkgs docker-buildx-plugin"
fi
if ! is_dry_run; then
set -x
fi
$sh_c "zypper -q install -y $pkgs"
)
echo_docker_as_nonroot
exit 0
;;
*)
if [ -z "$lsb_dist" ]; then
if is_darwin; then
echo
echo "ERROR: Unsupported operating system 'macOS'"
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
echo
exit 1
fi
fi
echo
echo "ERROR: Unsupported distribution '$lsb_dist'"
echo
exit 1
;;
esac
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,445 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.2.1
- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1
- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
### 4.2.0
- Support for Kubernetes v1.19.0 was removed
- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0"
- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name"
- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1"
- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16"
- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process"
- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix"
- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check"
- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16"
- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0"
- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process"
- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable"
- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15"
- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2"
- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format"
- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API"
- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide"
- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs"
- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14"
- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0"
- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5"
- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement"
- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver"
- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step"
- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha"
- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path"
- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases"
- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds"
- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps"
- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)"
- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard"
- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0"
- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2"
- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos"
- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it"
- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0"
- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0"
- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3"
- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1"
### 4.1.2
- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed"
- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter"
- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart"
### 4.1.0
- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script"
- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6"
- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod"
- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty"
- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector"
- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies"
- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md"
- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing"
- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist"
- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one"
- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement"
- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation"
- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0"
### 4.0.18
- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build"
- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build"
- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge"
- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241"
- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts"
- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error"
- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation"
- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric"
- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code."
- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image"
- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests"
- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint"
- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1"
- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0"
- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint"
- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial"
- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera"
- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment"
- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation"
- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell"
- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor"
- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations"
- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0"
- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account"
- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description"
- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests"
- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values"
- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1"
- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs"
- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits"
- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations"
- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT"
### 4.0.15
- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1
- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6
- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs
- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors
- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release
- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P…
- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch
- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable
- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart
- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0
- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0
- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation
- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045)
- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues
- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543
- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name
- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners
- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option
- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags
- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified
- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page
- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation
- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations
- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs
- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files
- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide
- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile
- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud
- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement
- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation.
- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs
- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server
- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog
- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition
### 4.0.14
- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md
### 4.0.13
- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml
### 4.0.12
- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs
### 4.0.11
- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional.
### 4.0.10
- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0
### 4.0.9
- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources
### 4.0.7
- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx
- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service
### 4.0.6
- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx
- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart
- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode
- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1
### 4.0.5
- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx
### 4.0.3
- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx
### 4.0.2
- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx
### 4.0.1
- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx
### 3.34.0
- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates
### 3.33.0
- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1
### 3.32.0
- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA
### 3.31.0
- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes
### 3.30.0
- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints
### 3.29.0
- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor
### 3.28.0
- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs
### 3.27.0
- Update ingress-nginx v0.45.0
### 3.26.0
- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics
### 3.25.0
- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken
### 3.24.0
- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment
### 3.23.0
- Update ingress-nginx v0.44.0
### 3.22.0
- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file
- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart
### 3.21.0
- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject
- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values
- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled
- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1
### 3.20.1
- Do not create KEDA in case of DaemonSets.
- Fix KEDA v2 definition
### 3.20.0
- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled.
### 3.19.0
- Update ingress-nginx v0.43.0
### 3.18.0
- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy
- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters
### 3.17.0
- Update ingress-nginx v0.42.0
### 3.16.1
- Fix chart-releaser action
### 3.16.0
- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service
### 3.15.1
- Fix chart-releaser action
### 3.15.0
- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml
### 3.14.0
- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend
### 3.13.0
- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable
### 3.12.0
- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs
### 3.11.1
- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling
### 3.11.0
- Support Keda Autoscaling
### 3.10.1
- Fix regression introduced in 0.41.0 with external authentication
### 3.10.0
- Fix routing regression introduced in 0.41.0 with PathType Exact
### 3.9.0
- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling
### 3.8.0
- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image
- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs
- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend
- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations
- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog
### 3.7.1
- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart
### 3.7.0
- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315)
### 3.6.0
- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector
### 3.5.1
- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release
### 3.5.0
- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations
### 3.4.0
- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288
### 3.3.1
- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart
- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link
- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0
### 3.3.1
- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test
### 3.3.0
- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values
- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort
- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression
- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules
### 3.0.0
- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements
### 2.16.0
- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller
### 2.15.0
- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec
### 2.14.0
- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration
### 2.13.0
- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
### 2.13.0
- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0
- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip
### 2.12.1
- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples
### 2.12.0
- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels
- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting
### 2.11.3
- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH
### 2.11.2
- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version
### 2.11.1
- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1
### 2.11.0
- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0
- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe
### 2.10.0
- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image
### 2.9.1
- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823)
### 2.9.0
- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues
### TODO
Keep building the changelog using *git log charts* checking the tag

View File

@@ -0,0 +1,23 @@
annotations:
artifacthub.io/changes: |
- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
- "fix permissions about configmap"
artifacthub.io/prerelease: "false"
apiVersion: v2
appVersion: 1.3.1
description: Ingress controller for Kubernetes using NGINX as a reverse proxy and
load balancer
home: https://github.com/kubernetes/ingress-nginx
icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png
keywords:
- ingress
- nginx
kubeVersion: '>=1.20.0-0'
maintainers:
- name: rikatz
- name: strongjz
- name: tao12345666333
name: ingress-nginx
sources:
- https://github.com/kubernetes/ingress-nginx
version: 4.2.5

View File

@@ -0,0 +1,10 @@
# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md
approvers:
- ingress-nginx-helm-maintainers
reviewers:
- ingress-nginx-helm-reviewers
labels:
- area/helm

View File

@@ -0,0 +1,494 @@
# ingress-nginx
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
![Version: 4.2.5](https://img.shields.io/badge/Version-4.2.5-informational?style=flat-square) ![AppVersion: 1.3.1](https://img.shields.io/badge/AppVersion-1.3.1-informational?style=flat-square)
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
## Get Repo Info
```console
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
```console
helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
```
The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
1. For critical services in production that require zero-downtime, you will want to:
1. [Install](#install-chart) a second Ingress controller
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values ingress-nginx/ingress-nginx
```
### PodDisruptionBudget
Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
service:
annotations:
external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
```
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
service:
targetPorts:
http: http
https: http
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
You'll need to set both the following values:
`controller.service.internal.enabled`
`controller.service.internal.annotations`
If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
`controller.service.internal.annotations` varies with the cloud service you're using.
Example for AWS:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for GCE:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
# cloud.google.com/load-balancer-type: "Internal"
# Any other annotation can be declared here.
```
Example for Azure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for Oracle Cloud Infrastructure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/oci-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
```console
Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
```
Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
## Requirements
Kubernetes: `>=1.20.0-0`
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| commonLabels | object | `{}` | |
| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers |
| controller.admissionWebhooks.annotations | object | `{}` | |
| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | |
| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | |
| controller.admissionWebhooks.enabled | bool | `true` | |
| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use |
| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | |
| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks |
| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
| controller.admissionWebhooks.networkPolicyEnabled | bool | `false` | |
| controller.admissionWebhooks.objectSelector | object | `{}` | |
| controller.admissionWebhooks.patch.enabled | bool | `true` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"` | |
| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v1.3.0"` | |
| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | |
| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # |
| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | |
| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | |
| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | |
| controller.admissionWebhooks.patch.tolerations | list | `[]` | |
| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | |
| controller.admissionWebhooks.port | int | `8443` | |
| controller.admissionWebhooks.service.annotations | object | `{}` | |
| controller.admissionWebhooks.service.externalIPs | list | `[]` | |
| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | |
| controller.admissionWebhooks.service.servicePort | int | `443` | |
| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | |
| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # |
| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected |
| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # |
| controller.autoscaling.behavior | object | `{}` | |
| controller.autoscaling.enabled | bool | `false` | |
| controller.autoscaling.maxReplicas | int | `11` | |
| controller.autoscaling.minReplicas | int | `1` | |
| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
| controller.autoscalingTemplate | list | `[]` | |
| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ |
| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. |
| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) |
| controller.containerName | string | `"controller"` | Configures the controller container name |
| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on |
| controller.customTemplate.configMapKey | string | `""` | |
| controller.customTemplate.configMapName | string | `""` | |
| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. |
| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. |
| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update |
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use |
| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
| controller.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. |
| controller.extraModules | list | `[]` | |
| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. |
| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. |
| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. |
| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. |
| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged |
| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not |
| controller.hostPort.ports.http | int | `80` | 'hostPort' http port |
| controller.hostPort.ports.https | int | `443` | 'hostPort' https port |
| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
| controller.image.allowPrivilegeEscalation | bool | `true` | |
| controller.image.chroot | bool | `false` | |
| controller.image.digest | string | `"sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"` | |
| controller.image.digestChroot | string | `"sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1"` | |
| controller.image.image | string | `"ingress-nginx/controller"` | |
| controller.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.image.registry | string | `"registry.k8s.io"` | |
| controller.image.runAsUser | int | `101` | |
| controller.image.tag | string | `"v1.3.1"` | |
| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |
| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster |
| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not |
| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass |
| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. |
| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | |
| controller.keda.behavior | object | `{}` | |
| controller.keda.cooldownPeriod | int | `300` | |
| controller.keda.enabled | bool | `false` | |
| controller.keda.maxReplicas | int | `11` | |
| controller.keda.minReplicas | int | `1` | |
| controller.keda.pollingInterval | int | `30` | |
| controller.keda.restoreToOriginalReplicaCount | bool | `false` | |
| controller.keda.scaledObject.annotations | object | `{}` | |
| controller.keda.triggers | list | `[]` | |
| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` |
| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # |
| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # |
| controller.livenessProbe.failureThreshold | int | `5` | |
| controller.livenessProbe.httpGet.path | string | `"/healthz"` | |
| controller.livenessProbe.httpGet.port | int | `10254` | |
| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | |
| controller.livenessProbe.initialDelaySeconds | int | `10` | |
| controller.livenessProbe.periodSeconds | int | `10` | |
| controller.livenessProbe.successThreshold | int | `1` | |
| controller.livenessProbe.timeoutSeconds | int | `1` | |
| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases |
| controller.metrics.enabled | bool | `false` | |
| controller.metrics.port | int | `10254` | |
| controller.metrics.prometheusRule.additionalLabels | object | `{}` | |
| controller.metrics.prometheusRule.enabled | bool | `false` | |
| controller.metrics.prometheusRule.rules | list | `[]` | |
| controller.metrics.service.annotations | object | `{}` | |
| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | |
| controller.metrics.service.servicePort | int | `10254` | |
| controller.metrics.service.type | string | `"ClusterIP"` | |
| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | |
| controller.metrics.serviceMonitor.enabled | bool | `false` | |
| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | |
| controller.metrics.serviceMonitor.namespace | string | `""` | |
| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | |
| controller.metrics.serviceMonitor.relabelings | list | `[]` | |
| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | |
| controller.metrics.serviceMonitor.targetLabels | list | `[]` | |
| controller.minAvailable | int | `1` | |
| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
| controller.name | string | `"controller"` | |
| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # |
| controller.podLabels | object | `{}` | Labels to add to the pod container metadata |
| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods |
| controller.priorityClassName | string | `""` | |
| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers |
| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. |
| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not |
| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be <namespace>/<service_name> |
| controller.readinessProbe.failureThreshold | int | `3` | |
| controller.readinessProbe.httpGet.path | string | `"/healthz"` | |
| controller.readinessProbe.httpGet.port | int | `10254` | |
| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | |
| controller.readinessProbe.initialDelaySeconds | int | `10` | |
| controller.readinessProbe.periodSeconds | int | `10` | |
| controller.readinessProbe.successThreshold | int | `1` | |
| controller.readinessProbe.timeoutSeconds | int | `1` | |
| controller.replicaCount | int | `1` | |
| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply |
| controller.resources.requests.cpu | string | `"100m"` | |
| controller.resources.requests.memory | string | `"90Mi"` | |
| controller.scope.enabled | bool | `false` | Enable 'scope' or not |
| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) |
| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. |
| controller.service.annotations | object | `{}` | |
| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # |
| controller.service.enableHttp | bool | `true` | |
| controller.service.enableHttps | bool | `true` | |
| controller.service.enabled | bool | `true` | |
| controller.service.external.enabled | bool | `true` | |
| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. |
| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). |
| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. |
| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.labels | object | `{}` | |
| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer |
| controller.service.loadBalancerSourceRanges | list | `[]` | |
| controller.service.nodePorts.http | string | `""` | |
| controller.service.nodePorts.https | string | `""` | |
| controller.service.nodePorts.tcp | object | `{}` | |
| controller.service.nodePorts.udp | object | `{}` | |
| controller.service.ports.http | int | `80` | |
| controller.service.ports.https | int | `443` | |
| controller.service.targetPorts.http | string | `"http"` | |
| controller.service.targetPorts.https | string | `"https"` | |
| controller.service.type | string | `"LoadBalancer"` | |
| controller.shareProcessNamespace | bool | `false` | |
| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap |
| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # |
| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # |
| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap |
| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false |
| defaultBackend.affinity | object | `{}` | |
| defaultBackend.autoscaling.annotations | object | `{}` | |
| defaultBackend.autoscaling.enabled | bool | `false` | |
| defaultBackend.autoscaling.maxReplicas | int | `2` | |
| defaultBackend.autoscaling.minReplicas | int | `1` | |
| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.enabled | bool | `false` | |
| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| defaultBackend.extraArgs | object | `{}` | |
| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods |
| defaultBackend.extraVolumeMounts | list | `[]` | |
| defaultBackend.extraVolumes | list | `[]` | |
| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | |
| defaultBackend.image.image | string | `"defaultbackend-amd64"` | |
| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | |
| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | |
| defaultBackend.image.registry | string | `"registry.k8s.io"` | |
| defaultBackend.image.runAsNonRoot | bool | `true` | |
| defaultBackend.image.runAsUser | int | `65534` | |
| defaultBackend.image.tag | string | `"1.5"` | |
| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources |
| defaultBackend.livenessProbe.failureThreshold | int | `3` | |
| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | |
| defaultBackend.livenessProbe.periodSeconds | int | `10` | |
| defaultBackend.livenessProbe.successThreshold | int | `1` | |
| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | |
| defaultBackend.minAvailable | int | `1` | |
| defaultBackend.name | string | `"defaultbackend"` | |
| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # |
| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # |
| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata |
| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.port | int | `8080` | |
| defaultBackend.priorityClassName | string | `""` | |
| defaultBackend.readinessProbe.failureThreshold | int | `6` | |
| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | |
| defaultBackend.readinessProbe.periodSeconds | int | `5` | |
| defaultBackend.readinessProbe.successThreshold | int | `1` | |
| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | |
| defaultBackend.replicaCount | int | `1` | |
| defaultBackend.resources | object | `{}` | |
| defaultBackend.service.annotations | object | `{}` | |
| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/user-guide/services/#external-ips # |
| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | |
| defaultBackend.service.servicePort | int | `80` | |
| defaultBackend.service.type | string | `"ClusterIP"` | |
| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | |
| defaultBackend.serviceAccount.create | bool | `true` | |
| defaultBackend.serviceAccount.name | string | `""` | |
| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param |
| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
| podSecurityPolicy.enabled | bool | `false` | |
| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration |
| rbac.create | bool | `true` | |
| rbac.scope | bool | `false` | |
| revisionHistoryLimit | int | `10` | Rollback limit # |
| serviceAccount.annotations | object | `{}` | Annotations for the controller service account |
| serviceAccount.automountServiceAccountToken | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |
| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |

View File

@@ -0,0 +1,235 @@
{{ template "chart.header" . }}
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
## Get Repo Info
```console
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
```console
helm install [RELEASE_NAME] ingress-nginx/ingress-nginx
```
The command deploys ingress-nginx on the Kubernetes cluster in the default configuration.
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one
1. For critical services in production that require zero-downtime, you will want to:
1. [Install](#install-chart) a second Ingress controller
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
helm show values ingress-nginx/ingress-nginx
```
### PodDisruptionBudget
Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one,
else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info.
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
service:
annotations:
external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com.
```
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
service:
targetPorts:
http: http
https: http
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https"
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/blob/be63d4f1a7a46daaf1c4c482527328236850f111/addons/route53-mapper/README.md), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL.
You'll need to set both the following values:
`controller.service.internal.enabled`
`controller.service.internal.annotations`
If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken.
`controller.service.internal.annotations` varies with the cloud service you're using.
Example for AWS:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for GCE:
```yaml
controller:
service:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
# cloud.google.com/load-balancer-type: "Internal"
# Any other annotation can be declared here.
```
Example for Azure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
Example for Oracle Cloud Infrastructure:
```yaml
controller:
service:
annotations:
# Create internal LB
service.beta.kubernetes.io/oci-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
```console
Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable
```
Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error.
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@@ -0,0 +1,7 @@
controller:
watchIngressWithoutClass: true
ingressClassResource:
name: custom-nginx
enabled: true
default: true
controllerValue: "k8s.io/custom-nginx"

View File

@@ -0,0 +1,14 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
kind: DaemonSet
allowSnippetAnnotations: false
admissionWebhooks:
enabled: false
service:
type: ClusterIP
config:
use-proxy-protocol: "true"

View File

@@ -0,0 +1,22 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort
nodePorts:
tcp:
9000: 30090
udp:
9001: 30091
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
service:
type: ClusterIP
extraModules:
- name: opentelemetry
image: busybox

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
addHeaders:
X-Frame-Options: deny
proxySetHeaders:
X-Forwarded-Proto: https
service:
type: ClusterIP

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
internal:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort

View File

@@ -0,0 +1,17 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP
podAnnotations:
prometheus.io/path: /metrics
prometheus.io/port: "10254"
prometheus.io/scheme: http
prometheus.io/scrape: "true"

View File

@@ -0,0 +1,20 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
configMapNamespace: default
udp:
configMapNamespace: default
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,18 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"
portNamePrefix: "port"

View File

@@ -0,0 +1,16 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,14 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
9001: "default/test:8080"

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,12 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,13 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,13 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,14 @@
controller:
autoscaling:
enabled: true
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Pods
value: 1
periodSeconds: 180
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
autoscaling:
enabled: true
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,12 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
config:
use-proxy-protocol: "true"
allowSnippetAnnotations: false
admissionWebhooks:
enabled: false
service:
type: ClusterIP

View File

@@ -0,0 +1,20 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort
nodePorts:
tcp:
9000: 30090
udp:
9001: 30091
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,8 @@
# Left blank to test default values
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP

View File

@@ -0,0 +1,10 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
extraModules:
- name: opentelemetry
image: busybox

View File

@@ -0,0 +1,13 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
addHeaders:
X-Frame-Options: deny
proxySetHeaders:
X-Forwarded-Proto: https
service:
type: ClusterIP

View File

@@ -0,0 +1,13 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
internal:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,9 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: NodePort

View File

@@ -0,0 +1,16 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
metrics:
enabled: true
service:
type: ClusterIP
podAnnotations:
prometheus.io/path: /metrics
prometheus.io/port: "10254"
prometheus.io/scheme: http
prometheus.io/scrape: "true"

View File

@@ -0,0 +1,10 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,19 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
configMapNamespace: default
udp:
configMapNamespace: default
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,17 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"
portNamePrefix: "port"

View File

@@ -0,0 +1,15 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: false
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
udp:
9001: "default/test:8080"

View File

@@ -0,0 +1,11 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
service:
type: ClusterIP
tcp:
9000: "default/test:8080"
9001: "default/test:8080"

View File

@@ -0,0 +1,12 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP
podSecurityPolicy:
enabled: true

View File

@@ -0,0 +1,12 @@
controller:
service:
type: ClusterIP
admissionWebhooks:
enabled: true
extraEnvs:
- name: FOO
value: foo
- name: TEST
value: test
patch:
enabled: true

View File

@@ -0,0 +1,23 @@
controller:
service:
type: ClusterIP
admissionWebhooks:
enabled: true
createSecretJob:
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
patchWebhookJob:
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
patch:
enabled: true

View File

@@ -0,0 +1,9 @@
controller:
image:
repository: ingress-controller/controller
tag: 1.0.0-dev
digest: null
admissionWebhooks:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,10 @@
controller:
kind: DaemonSet
service:
type: LoadBalancer
nodePorts:
http: "30000"
https: "30001"
tcp: {}
udp: {}

View File

@@ -0,0 +1,724 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
data:
allow-snippet-annotations: "true"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: release-name-ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: release-name-ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-ingress-nginx
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx
namespace: "default"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: release-name-ingress-nginx
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx
namespace: "default"
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller-admission
namespace: default
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
spec:
type: LoadBalancer
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
replicas: 1
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: release-name-ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: release-name-ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: release-name-ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: "default"
name: release-name-ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: release-name-ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: release-name-ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx-admission
namespace: "default"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: release-name-ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx-admission
namespace: "default"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: release-name-ingress-nginx-admission-create
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: release-name-ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"
imagePullPolicy: IfNotPresent
args:
- create
- --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=release-name-ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: release-name-ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: release-name-ingress-nginx-admission-patch
namespace: default
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: release-name-ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=release-name-ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=release-name-ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: release-name-ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000

View File

@@ -0,0 +1,725 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
data:
allow-snippet-annotations: "true"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: release-name-ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: release-name-ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-ingress-nginx
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx
namespace: "default"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: release-name-ingress-nginx
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx
namespace: "default"
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller-admission
namespace: default
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
spec:
type: LoadBalancer
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
nodePort: 30000
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
nodePort: 30001
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: release-name-ingress-nginx-controller
namespace: default
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.3.1@sha256:54f7fe2c6c5a9db9a0ebf1131797109bb7a4d91f56b9b362bde2abd237dd1974"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/release-name-ingress-nginx-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/release-name-ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: release-name-ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: release-name-ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: release-name-ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: "default"
name: release-name-ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: release-name-ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: release-name-ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx-admission
namespace: "default"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: release-name-ingress-nginx-admission
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: release-name-ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: release-name-ingress-nginx-admission
namespace: "default"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: release-name-ingress-nginx-admission-create
namespace: default
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: release-name-ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"
imagePullPolicy: IfNotPresent
args:
- create
- --host=release-name-ingress-nginx-controller-admission,release-name-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=release-name-ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: release-name-ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: release-name-ingress-nginx-admission-patch
namespace: default
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: release-name-ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.2.5
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0@sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47"
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=release-name-ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=release-name-ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: release-name-ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000

View File

@@ -0,0 +1,80 @@
The ingress-nginx controller has been installed.
{{- if contains "NodePort" .Values.controller.service.type }}
Get the application URL by running these commands:
{{- if (not (empty .Values.controller.service.nodePorts.http)) }}
export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }}
{{- else }}
export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
{{- end }}
{{- if (not (empty .Values.controller.service.nodePorts.https)) }}
export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }}
{{- else }}
export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }})
{{- end }}
export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}")
echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP."
echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS."
{{- else if contains "LoadBalancer" .Values.controller.service.type }}
It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}'
{{- else if contains "ClusterIP" .Values.controller.service.type }}
Get the application URL by running these commands:
export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
echo "Visit http://127.0.0.1:8080 to access your application."
{{- end }}
An example Ingress that makes use of the controller:
{{- $isV1 := semverCompare ">=1" .Chart.AppVersion}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example
namespace: foo
{{- if eq $isV1 false }}
annotations:
kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }}
{{- end }}
spec:
{{- if $isV1 }}
ingressClassName: {{ .Values.controller.ingressClassResource.name }}
{{- end }}
rules:
- host: www.example.com
http:
paths:
- pathType: Prefix
backend:
service:
name: exampleService
port:
number: 80
path: /
# This section is only required if TLS is to be enabled for the Ingress
tls:
- hosts:
- www.example.com
secretName: example-tls
If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
apiVersion: v1
kind: Secret
metadata:
name: example-tls
namespace: foo
data:
tls.crt: <base64 encoded cert>
tls.key: <base64 encoded key>
type: kubernetes.io/tls
{{- if .Values.controller.headers }}
#################################################################################
###### WARNING: `controller.headers` has been deprecated! #####
###### It has been renamed to `controller.proxySetHeaders`. #####
#################################################################################
{{- end }}

View File

@@ -0,0 +1,185 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ingress-nginx.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ingress-nginx.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "ingress-nginx.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Container SecurityContext.
*/}}
{{- define "controller.containerSecurityContext" -}}
{{- if .Values.controller.containerSecurityContext -}}
{{- toYaml .Values.controller.containerSecurityContext -}}
{{- else -}}
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
{{- if .Values.controller.image.chroot }}
- SYS_CHROOT
{{- end }}
runAsUser: {{ .Values.controller.image.runAsUser }}
allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }}
{{- end }}
{{- end -}}
{{/*
Get specific image
*/}}
{{- define "ingress-nginx.image" -}}
{{- if .chroot -}}
{{- printf "%s-chroot" .image -}}
{{- else -}}
{{- printf "%s" .image -}}
{{- end }}
{{- end -}}
{{/*
Get specific image digest
*/}}
{{- define "ingress-nginx.imageDigest" -}}
{{- if .chroot -}}
{{- if .digestChroot -}}
{{- printf "@%s" .digestChroot -}}
{{- end }}
{{- else -}}
{{ if .digest -}}
{{- printf "@%s" .digest -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified controller name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "ingress-nginx.controller.fullname" -}}
{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Construct the path for the publish-service.
By convention this will simply use the <namespace>/<controller-name> to match the name of the
service generated.
Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride`
*/}}
{{- define "ingress-nginx.controller.publishServicePath" -}}
{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}}
{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }}
{{- print $servicePath | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified default backend name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "ingress-nginx.defaultBackend.fullname" -}}
{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "ingress-nginx.labels" -}}
helm.sh/chart: {{ include "ingress-nginx.chart" . }}
{{ include "ingress-nginx.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "ingress-nginx.selectorLabels" -}}
app.kubernetes.io/name: {{ include "ingress-nginx.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the controller service account to use
*/}}
{{- define "ingress-nginx.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled
*/}}
{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}}
{{- if .Values.defaultBackend.serviceAccount.create -}}
{{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }}
{{- else -}}
{{ default "default-backend" .Values.defaultBackend.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiGroup for PodSecurityPolicy.
*/}}
{{- define "podSecurityPolicy.apiGroup" -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "policy" -}}
{{- else -}}
{{- print "extensions" -}}
{{- end -}}
{{- end -}}
{{/*
Check the ingress controller version tag is at most three versions behind the last release
*/}}
{{- define "isControllerTagValid" -}}
{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}}
{{- fail "Controller container image tag should be 0.27.0 or higher" -}}
{{- end -}}
{{- end -}}
{{/*
IngressClass parameters.
*/}}
{{- define "ingressClass.parameters" -}}
{{- if .Values.controller.ingressClassResource.parameters -}}
parameters:
{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}}
{{ end }}
{{- end -}}

View File

@@ -0,0 +1,62 @@
{{- define "ingress-nginx.params" -}}
- /nginx-ingress-controller
{{- if .Values.defaultBackend.enabled }}
- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }}
{{- end }}
{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }}
{{- if .Values.controller.service.external.enabled }}
- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}
{{- else if .Values.controller.service.internal.enabled }}
- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal
{{- end }}
{{- end }}
- --election-id={{ .Values.controller.electionID }}
- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }}
{{- if .Values.controller.ingressClass }}
- --ingress-class={{ .Values.controller.ingressClass }}
{{- end }}
- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }}
{{- if .Values.tcp }}
- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp
{{- end }}
{{- if .Values.udp }}
- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp
{{- end }}
{{- if .Values.controller.scope.enabled }}
- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }}
{{- end }}
{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }}
- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }}
{{- end }}
{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }}
- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.enabled }}
- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }}
- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }}
- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }}
{{- end }}
{{- if .Values.controller.maxmindLicenseKey }}
- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }}
{{- end }}
{{- if .Values.controller.healthCheckHost }}
- --healthz-host={{ .Values.controller.healthCheckHost }}
{{- end }}
{{- if not (eq .Values.controller.healthCheckPath "/healthz") }}
- --health-check-path={{ .Values.controller.healthCheckPath }}
{{- end }}
{{- if .Values.controller.ingressClassByName }}
- --ingress-class-by-name=true
{{- end }}
{{- if .Values.controller.watchIngressWithoutClass }}
- --watch-ingress-without-class=true
{{- end }}
{{- range $key, $value := .Values.controller.extraArgs }}
{{- /* Accept keys without values or with false as value */}}
{{- if eq ($value | quote | len) 2 }}
- --{{ $key }}
{{- else }}
- --{{ $key }}={{ $value }}
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,34 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
{{- if .Values.podSecurityPolicy.enabled }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
{{- with .Values.controller.admissionWebhooks.existingPsp }}
- {{ . }}
{{- else }}
- {{ include "ingress-nginx.fullname" . }}-admission
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "ingress-nginx.fullname" . }}-admission
subjects:
- kind: ServiceAccount
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace | quote }}
{{- end }}

View File

@@ -0,0 +1,79 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission-create
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
{{- with .Values.controller.admissionWebhooks.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
# Alpha feature since k8s 1.12
ttlSecondsAfterFinished: 0
{{- end }}
template:
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission-create
{{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 8 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
containers:
- name: create
{{- with .Values.controller.admissionWebhooks.patch.image }}
image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
args:
- create
- --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name={{ include "ingress-nginx.fullname" . }}-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.controller.admissionWebhooks.extraEnvs }}
{{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.controller.admissionWebhooks.createSecretJob.resources }}
resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }}
{{- end }}
restartPolicy: OnFailure
serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
{{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.tolerations }}
tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.securityContext }}
securityContext:
{{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,81 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission-patch
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
{{- with .Values.controller.admissionWebhooks.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }}
# Alpha feature since k8s 1.12
ttlSecondsAfterFinished: 0
{{- end }}
template:
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission-patch
{{- if .Values.controller.admissionWebhooks.patch.podAnnotations }}
annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 8 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.controller.admissionWebhooks.patch.priorityClassName }}
priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
containers:
- name: patch
{{- with .Values.controller.admissionWebhooks.patch.image }}
image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }}
args:
- patch
- --webhook-name={{ include "ingress-nginx.fullname" . }}-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name={{ include "ingress-nginx.fullname" . }}-admission
- --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.controller.admissionWebhooks.extraEnvs }}
{{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }}
resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }}
{{- end }}
restartPolicy: OnFailure
serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission
{{- if .Values.controller.admissionWebhooks.patch.nodeSelector }}
nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.tolerations }}
tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.securityContext }}
securityContext:
{{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,39 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
allowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
requiredDropCapabilities:
- ALL
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "ingress-nginx.fullname" . }}-admission
subjects:
- kind: ServiceAccount
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace | quote }}
{{- end }}

View File

@@ -0,0 +1,16 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,48 @@
{{- if .Values.controller.admissionWebhooks.enabled -}}
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
{{- if .Values.controller.admissionWebhooks.annotations }}
annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }}
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: {{ .Release.Namespace | quote }}
name: {{ include "ingress-nginx.controller.fullname" . }}-admission
path: /networking/v1/ingresses
{{- if .Values.controller.admissionWebhooks.timeoutSeconds }}
timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.namespaceSelector }}
namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.objectSelector }}
objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,94 @@
{{- if .Values.rbac.create }}
{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}}
{{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }}
{{- end }}
{{- if not .Values.rbac.scope -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
{{- if not .Values.controller.scope.enabled }}
- namespaces
{{- end}}
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }}
- apiGroups:
- ""
resources:
- namespaces
resourceNames:
- "{{ .Values.controller.scope.namespace }}"
verbs:
- get
{{- end }}
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if and .Values.rbac.create (not .Values.rbac.scope) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "ingress-nginx.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "ingress-nginx.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}

View File

@@ -0,0 +1,14 @@
{{- if .Values.controller.addHeaders -}}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers
namespace: {{ .Release.Namespace }}
data: {{ toYaml .Values.controller.addHeaders | nindent 2 }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
namespace: {{ .Release.Namespace }}
data:
{{- if .Values.controller.proxySetHeaders }}
{{ toYaml .Values.controller.proxySetHeaders | indent 2 }}
{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }}
{{ toYaml .Values.controller.headers | indent 2 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.tcp -}}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.controller.tcp.annotations }}
annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}-tcp
namespace: {{ .Release.Namespace }}
data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More