Argo Workflows 재구성

This commit is contained in:
dsk-minchulahn
2024-01-04 14:17:00 +09:00
parent 7659dc51cb
commit c7a1a1f0bd
39 changed files with 177 additions and 1578 deletions

View File

@@ -1,12 +1,12 @@
annotations:
artifacthub.io/changes: |
- kind: fixed
description: Add HA docs and example
- kind: added
description: Add instruction for SSO
artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
apiVersion: v2
appVersion: v3.4.11
appVersion: v3.5.2
description: A Helm chart for Argo Workflows
home: https://github.com/argoproj/argo-helm
icon: https://argoproj.github.io/argo-workflows/assets/logo.png
@@ -17,4 +17,4 @@ name: argo-workflows
sources:
- https://github.com/argoproj/argo-workflows
type: application
version: 0.33.3
version: 0.40.3

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: aws-creds
type: Opaque
data:
AWS_REGION: YXAtbm9ydGhlYXN0LTI=
AWS_ACCESS_KEY_ID: QUtJQVhNVlZGM1RBVEhaTFlFSE4=
AWS_SECRET_ACCESS_KEY: MEQ4WGdmK2gzU3hKQmhBUkdpbXhETUJJbTRMUFNmQmswNlRyaEkxSA==
KMS_ARN: YXJuOmF3czprbXM6YXAtbm9ydGhlYXN0LTI6NTA4MjU5ODUxNDU3OmtleS9hNDhiMWU4OC1hOWJiLTRkODYtYTQ4MS1lZjU0ZTJmNDA0NTI=

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: git-credentials
type: Opaque
data:
username: amFlaGVlLWp1bmc=
password: Z2hwX0FBRnJ3eGI2ZDZvYXFGdzJaRTdhUmlPUmpwSzlVcjNuNkc3bA==

View File

@@ -1,18 +0,0 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMrRENDQWVDZ0F3SUJBZ0lNRnlXQnVlNTZlNTJFd2xzNU1BMEdDU3FHU0liM0RRRUJDd1VBTUJneEZqQVUKQmdOVkJBTVREV3QxWW1WeWJtVjBaWE10WTJFd0hoY05Nakl4TVRBMk1EUXpNVEV5V2hjTk16SXhNVEExTURRegpNVEV5V2pBWU1SWXdGQVlEVlFRREV3MXJkV0psY201bGRHVnpMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBUThBTUlJQkNnS0NBUUVBMElSdldJcVV3WnR4WlZhS2YyMHlFWHNyV2NWNzRzUFpJa0pwM0hqV2JMU3kKYlFkL0xiOVdzMlRRN2FNT1J4U2szTy8yVnJUU2EyWlVubDJyQnN4QThHWENnZDhENm9mcHBnN0FxTWtVQ3REOApacFBNMkFOK2UzWmlUaGlCUHA0aHRmcG4xWEEreEFVTDNsY2RqZHBJVi9vOURIR0J5YTZPSUZIUk5BblRPSWllCnpmZXg3dXZBbmZYa2xudyt6Z2VKTmE3anFhd3RSNFpVTE5Zay9MaU52MlJuVzRYQ1lSQ2ZIMWhLNlp6UG5JYk8KSjhLQzFlbnB2NFFRNXBUYU8yQ0M2RjhGSVRFcHVwNUlKWENEUllHa29Hei9Tbzc0MUdXZ0FmelhRT3hHTS93Wgp4Y2poQ3NuN2psZGRwcXQ3VWprMFV6NTVycXRHZGo5Y3JZUWRtRXlkelFJREFRQUJvMEl3UURBT0JnTlZIUThCCkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVN2SkxFTGhUcEpVd3A5UmwKYytQckpvYzVzZjB3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUlwRnBWVzBNd1JGdnRTall2ZldwdGdPMHdGUApRL1BGcTl3c0w4YzlEYjdadm04UXI0aHZVV0NRbnJjLzhzeWNCNTJoREhqbVJvdzIrOTdyT0JKaTRjOWRhMllKCkhuRVFBZEpFWVlPRjVJZ0Z0T0JVcUpjbmdxbEQzMEQ4cjFncVI4RDkxbEpiVXVzUnFWc2pSUXNvWFNGelZwSVYKTVBsQ3IrZloxVEpXSmN6RVVoRDkrNnFZUnpmUlBSU3VKdmNmUzFtbWlBaUdOaEwwZXBWN2s3eW4yUHlJTEVWQwpQcDFMcStNUGJJdnNRcnZHQnZ2aURpUCs1SXJlVUYvZzNGU1JqQVNIaGxvZ0NwZFFjZkVlQnVXNkxYdksyNUJjCk9PbUVUcXpaMm40ZTlQMUg4M25nSHVDOEdwaU1iY1VaaFlLQ0hYSXU0MEdrdkl5R0wyZTF2Vks4a2o4PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://api.k8s-prod.datasaker.io
name: dsk-prod-cluster
contexts:
- context:
cluster: dsk-prod-cluster
user: monitor-ro
name: dsk-prod-cluster
current-context: dsk-prod-cluster
kind: Config
preferences: {}
users:
- name: monitor-ro
user:
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ik9LMDAzSmljd1NydVNMYkc1cVg4Sk1qYW9QNDMxVEp2bmlGQ2FMaFQtQVUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6Im1vbml0b3Itcm8tc2VjcmV0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Im1vbml0b3Itcm8iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI1ZTliZDYwNy0yZTllLTQwNWUtOGM1My00ZWU4Njg0ZTUzZjIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDptb25pdG9yLXJvIn0.otQZico3ewb89vgcZRRxP3MfWS-P25rhqahisz80Kku8XrbdiJZqff3lZxU1Snb9w1cTgvszEAB7yBDQqPWiFVKSZl8xedWJaraOHR7UvvnoclTeEtyrUNcMbG4bbljPk0sb-H-H3FR_xw6oJvRsWTSEq0z2ztMKp0R8ky4gQOl8hdFQ0eICGs1I6Gy69z9PuaVjhBNO3EIvMPJQcFtbgiXYBpAqKefRFnrJ5gW00cZhb0Jcc5OQkoTByGqMpsdCB4JpYmjA0qSo79G9JkN2iN2VaqvxUjc1MKIzpYr_2bN9PFIakue8qILVUaHQyUcSxPJN1Y0rwNVZ_xiyedhxLA

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: my-kubeconfig
type: Opaque
data:
config: YXBpVmVyc2lvbjogdjEKY2x1c3RlcnM6Ci0gY2x1c3RlcjoKICAgIGNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhOiBMUzB0TFMxQ1JVZEpUaUJEUlZKVVNVWkpRMEZVUlMwdExTMHRDazFKU1VNclJFTkRRV1ZEWjBGM1NVSkJaMGxOUm5sWFFuVmxOVFpsTlRKRmQyeHpOVTFCTUVkRFUzRkhVMGxpTTBSUlJVSkRkMVZCVFVKbmVFWnFRVlVLUW1kT1ZrSkJUVlJFVjNReFdXMVdlV0p0VmpCYVdFMTBXVEpGZDBob1kwNU5ha2w0VFZSQk1rMUVVWHBOVkVWNVYyaGpUazE2U1hoTlZFRXhUVVJSZWdwTlZFVjVWMnBCV1UxU1dYZEdRVmxFVmxGUlJFVjNNWEprVjBwc1kyMDFiR1JIVm5wTVYwNW9UVWxKUWtscVFVNUNaMnR4YUd0cFJ6bDNNRUpCVVVWR0NrRkJUME5CVVRoQlRVbEpRa05uUzBOQlVVVkJNRWxTZGxkSmNWVjNXblI0V2xaaFMyWXlNSGxGV0hOeVYyTldOelJ6VUZwSmEwcHdNMGhxVjJKTVUza0tZbEZrTDB4aU9WZHpNbFJSTjJGTlQxSjRVMnN6VHk4eVZuSlVVMkV5V2xWdWJESnlRbk40UVRoSFdFTm5aRGhFTm05bWNIQm5OMEZ4VFd0VlEzUkVPQXBhY0ZCTk1rRk9LMlV6V21sVWFHbENVSEEwYUhSbWNHNHhXRUVyZUVGVlRETnNZMlJxWkhCSlZpOXZPVVJJUjBKNVlUWlBTVVpJVWs1QmJsUlBTV2xsQ25wbVpYZzNkWFpCYm1aWWEyeHVkeXQ2WjJWS1RtRTNhbkZoZDNSU05GcFZURTVaYXk5TWFVNTJNbEp1VnpSWVExbFNRMlpJTVdoTE5scDZVRzVKWWs4S1NqaExRekZsYm5CMk5GRlJOWEJVWVU4eVEwTTJSamhHU1ZSRmNIVndOVWxLV0VORVVsbEhhMjlIZWk5VGJ6YzBNVWRYWjBGbWVsaFJUM2hIVFM5M1dncDRZMnBvUTNOdU4ycHNaR1J3Y1hRM1ZXcHJNRlY2TlRWeWNYUkhaR281WTNKWlVXUnRSWGxrZWxGSlJFRlJRVUp2TUVsM1VVUkJUMEpuVGxaSVVUaENDa0ZtT0VWQ1FVMURRVkZaZDBSM1dVUldVakJVUVZGSUwwSkJWWGRCZDBWQ0wzcEJaRUpuVGxaSVVUUkZSbWRSVlZOMlNreEZUR2hVY0VwVmQzQTVVbXdLWXl0UWNrcHZZelZ6WmpCM1JGRlpTa3R2V2tsb2RtTk9RVkZGVEVKUlFVUm5aMFZDUVVsd1JuQldWekJOZDFKR2RuUlRhbGwyWmxkd2RHZFBNSGRHVUFwUkwxQkdjVGwzYzB3NFl6bEVZamRhZG0wNFVYSTBhSFpWVjBOUmJuSmpMemh6ZVdOQ05USm9SRWhxYlZKdmR6SXJPVGR5VDBKS2FUUmpPV1JoTWxsS0NraHVSVkZCWkVwRldWbFBSalZKWjBaMFQwSlZjVXBqYm1keGJFUXpNRVE0Y2pGbmNWSTRSRGt4YkVwaVZYVnpVbkZXYzJwU1VYTnZXRk5HZWxad1NWWUtUVkJzUTNJclpsb3hWRXBYU21ONlJWVm9SRGtyTm5GWlVucG1VbEJTVTNWS2RtTm1VekZ0YldsQmFVZE9hRXd3WlhCV04yczNlVzR5VUhsSlRFVldRd3BRY0RGTWNTdE5VR0pKZG5OUmNuWkhRbloyYVVScFVDczFTWEpsVlVZdlp6TkdVMUpxUVZOSWFHeHZaME53WkZGalprVmxRblZYTmt4WWRrc3lOVUpqQ2s5UGJVVlVjWHBhTW00MFpUbFFNVWc0TTI1blNIVkRPRWR3YVUxaVkxVmFhRmxMUTBoWVNYVTBNRWRyZGtsNVIwd3laVEYyVmtzNGEybzRQUW90TFMwdExVVk9SQ0JEUlZKVVNVWkpRMEZVUlMwdExTMHRDZz09CiAgICBzZXJ2ZXI6IGh0dHBzOi8vYXBpLms4cy1wcm9kLmRhdGFzYWtlci5pbwogIG5hbWU6IGRzay1wcm9kLWNsdXN0ZXIKY29udGV4dHM6Ci0gY29udGV4dDoKICAgIGNsdXN0ZXI6IGRzay1wcm9kLWNsdXN0ZXIKICAgIHVzZXI6IG1vbml0b3Itcm8KICBuYW1lOiBkc2stcHJvZC1jbHVzdGVyCmN1cnJlbnQtY29udGV4dDogZHNrLXByb2QtY2x1c3RlcgpraW5kOiBDb25maWcKcHJlZmVyZW5jZXM6IHt9CnVzZXJzOgotIG5hbWU6IG1vbml0b3Itcm8KICB1c2VyOgogICAgdG9rZW46IGV5SmhiR2NpT2lKU1V6STFOaUlzSW10cFpDSTZJazlMTURBelNtbGpkMU55ZFZOTVlrYzFjVmc0U2sxcVlXOVFORE14VkVwMmJtbEdRMkZNYUZRdFFWVWlmUS5leUpwYzNNaU9pSnJkV0psY201bGRHVnpMM05sY25acFkyVmhZMk52ZFc1MElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl1WVcxbGMzQmhZMlVpT2lKa1pXWmhkV3gwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXpaV055WlhRdWJtRnRaU0k2SW0xdmJtbDBiM0l0Y204dGMyVmpjbVYwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXpaWEoyYVdObExXRmpZMjkxYm5RdWJtRnRaU0k2SW0xdmJtbDBiM0l0Y204aUxDSnJkV0psY201bGRHVnpMbWx2TDNObGNuWnBZMlZoWTJOdmRXNTBMM05sY25acFkyVXRZV05qYjNWdWRDNTFhV1FpT2lJMVpUbGlaRFl3TnkweVpUbGxMVFF3TldVdE9HTTFNeTAwWldVNE5qZzBaVFV6WmpJaUxDSnpkV0lpT2lKemVYTjBaVzA2YzJWeWRtbGpaV0ZqWTI5MWJuUTZaR1ZtWVhWc2REcHRiMjVwZEc5eUxYSnZJbjAub3RRWmljbzNld2I4OXZnY1pSUnhQM01mV1MtUDI1cmhxYWhpc3o4MEtrdThYcmJkaUpacWZmM2xaeFUxU25iOXcxY1RndnN6RUFCN3lCRFFxUFdpRlZLU1psOHhlZFdKYXJhT0hSN1V2dm5vY2xUZUV0eXJVTmNNYkc0YmJsalBrMHNiLUgtSDNGUl94dzZvSnZSc1dUU0VxMHoyenRNS3AwUjhreTRnUU9sOGhkRlEwZUlDR3MxSTZHeTY5ejlQdWFWamhCTk8zRUl2TVBKUWNGdGJnaVhZQnBBcUtlZlJGbnJKNWdXMDBjWmhiMEpjYzVPUWtvVEJ5R3FNcHNkQ0I0SnBZbWpBMHFTbzc5RzlKa04yaU4yVmFxdnhVamMxTUtJenBZcl8yYk45UEZJYWt1ZThxSUxWVWFIUXlVY1N4UEpOMVkwcndOVlpfeGl5ZWRoeExBCg==

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: bastion-ssh-key
type: Opaque
data:
ssh-key: LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUJsd0FBQUFkemMyZ3RjbgpOaEFBQUFBd0VBQVFBQUFZRUE0OXhHTFE5OGEweUFSWkNGdFA2ZFFWWDNlbTlvMEVqeVdCZDVGSTVwN0N6VXJJM0wyRDJ5CkR1ancyU1l0TTRFSU1hYWhPU0FUblBJNDFrTm9CWU85Y3UrZ0FWYzF4cVA4SjkrZm9BUE1WRmZPK0oyZ2pDRUJGYlNwRUwKZmlPNDRQN2NpZWNTWFRGdmxqby9lQndwbEEwTzl3RkxzSnN2eHo0dG9odGlWQnEwSHNkbDZnTmJkTFQrU1VBcDFJbDV3TgpTemdvUTNoS3dJYnhjckFoWlI4S3d6MjNoU1dDTC94NlMyRVlpdmY3WTVxOG8razlBQmJvMHlVMHovZUxrMTlxekpPQ0hLCkFFSlRWQ1Q4TGhMSnc3L1hDYkF6RnNDQnNXWmNVbHVVRDJqL0dMVnVBdURLd3RpRGlJemxmUDRLME04SnYyemtLT0YvRG0KM3gwSTRVT3ZZS2pNaElseUpQb3BvQWErakRVaSt3TDVJT1RiV3Y1VXpCNSswTU1EZVVaVGYyRmo4MTkyTmdTSGx1NnV3RwpKcW5BUUVQMCtpTkpYSFN2NHFZbENMMTNCd1hCbzV6TjBncmNQdGpwdVJOMk5JcHMxSkMrZHh4MUsrQzc3eVdmbVF1anZCCnF0YlI5bFZvQmtMK0xMVlpQRVZQWlgxZlZwRTE1eElFRGJNc2MrWHRBQUFGaVBVQThvYjFBUEtHQUFBQUIzTnphQzF5YzIKRUFBQUdCQU9QY1JpMFBmR3RNZ0VXUWhiVCtuVUZWOTNwdmFOQkk4bGdYZVJTT2Fld3MxS3lOeTlnOXNnN284TmttTFRPQgpDREdtb1RrZ0U1enlPTlpEYUFXRHZYTHZvQUZYTmNhai9DZmZuNkFEekZSWHp2aWRvSXdoQVJXMHFSQzM0anVPRCszSW5uCkVsMHhiNVk2UDNnY0taUU5EdmNCUzdDYkw4YytMYUliWWxRYXRCN0haZW9EVzNTMC9rbEFLZFNKZWNEVXM0S0VONFNzQ0cKOFhLd0lXVWZDc005dDRVbGdpLzhla3RoR0lyMysyT2F2S1BwUFFBVzZOTWxOTS8zaTVOZmFzeVRnaHlnQkNVMVFrL0M0Uwp5Y08vMXdtd014YkFnYkZtWEZKYmxBOW8veGkxYmdMZ3lzTFlnNGlNNVh6K0N0RFBDYjlzNUNqaGZ3NXQ4ZENPRkRyMkNvCnpJU0pjaVQ2S2FBR3ZvdzFJdnNDK1NEazIxcitWTXdlZnREREEzbEdVMzloWS9OZmRqWUVoNWJ1cnNCaWFwd0VCRDlQb2oKU1Z4MHIrS21KUWk5ZHdjRndhT2N6ZElLM0Q3WTZia1RkalNLYk5TUXZuY2NkU3ZndSs4bG41a0xvN3dhclcwZlpWYUFaQwovaXkxV1R4RlQyVjlYMWFSTmVjU0JBMnpMSFBsN1FBQUFBTUJBQUVBQUFHQUoveW5BQTlncVQvU251S2U1RVZ1ZVdISnZWCjNCeWhPVEcreDZuaVhqNXNOelN4alROZzZWcWRJdE9oNWQvbFRkaUVFU3VBQ3VFSFBkajVSaXM5MExxUmp1UG0zOGpQQ0kKTnNNaXN5VVhmWkd2UzZmMTNjR0kvRE1wSERyNDQ3U3BqUFFSQWhBK1BDRGw0SWQxNlIyVjUxU3RtYVc5TFFEcW9WdmdZSwpPbkk1TzlHSVRBbnN5YzFkZHFOZzgrQndVbXZCV04wMUZQNVB1SmNiUUJHRXJiUzZvUUE3aXlZUk53cnJ2S1g5RG05L0xXCnVma2JjR0dVMkVFR1JKelBqTHhJbi9xQmdCdzgzT3VJVDVrSXVxRHhaMEltOHVqS1RxR2UxbG5hakJHMG5yQkNHZEVSczUKUDlGRENWOEY0QkdPZlV5MGNqKyszZjg5U3RoREx2WGFLMzNrY3hpeTI2ZzhaTGQvWWZmVjF1SDBnVlFpNUlRUTVsc0hyWAozMkN4NU9YUEJmeldyNVJXcEw3T0VwUXVMcVJxZzdxU3g4N0t1eU4wUG15bnkzZkwrVUZtbjdjYVhneXhrZWliNExiaGVwClVzekRGYWdBbjE4UU1yNzJob1dyczh2ZklTMTg2eW1XYzFreTE0TzNZMWlDQ0s1VE5BQVB3S3VPSURWNllJbnIvdEFBQUEKd0hWMUhiUHZ0S0w3aHgwM2JYL29tMENSSzh3dXI2dGRJMTJRbVNzRkYrQ0JNQ1FmYUlMTDZaYVhQdDkvVWVDYm5QWnlBZQpkL0tqYVM0cWw3OVdlVHhRY2huK1Frd2V6SGJ1RDZ4WjRqUjV2WEIzc2pmMWlGcEVqVm9mdGtMK2VyT0duZjh0YzB6RTBiCllBTmlvUkpBQUVMUjJZMzlrQXJkOGREQWFmbzVYai82Y0dJUWI5ekpEdzZDb2g5SE03WW5maHdqQkpoQVBic01TUjBNUEsKNHpvTUYrenl5QXM3OVQrME1SUFUwVGV4b1RMY25GTGpPM0VHNHdBMEZ5dTBMOGt3QUFBTUVBODFJRmdMeFVZdTRJblVLbApSN0FmOHh2bnZqM1NNcEhNRURXemNCeDhqL3dNZWNoWjBrMVZwMmZCZzdwdjF1Tll1OXNGbzBvekJXdG0xZy9rcjJDYjVSCkZ6Tnh3eHpBUDg5L3RxZVA3NDVIcjBKamQ4L08yb3pCMThYTThYcVh4ZHpSSkg2dUFGUFI2N0Exby95Szk3dDAyVklqYkgKUnVwY2Z4TlpGeFZmbTF5Y1NBd1Z1N1NLK2FlUlBCbndBQk5mL3Y3Wm4rWWl0T0ozRGlHR3hSNWp4bHZtczIvR3pPNjlEZApsbFQwRGFPNkhPbWIyYTkwY0pucithc01xSGk4THZBQUFBd1FEdnZBTHpQVUhSaWY4K0RvY3VjMnUrdjlta2NyMDNhUkkrCkNoNGdXOFpSSTlmbDBKYkJ0akhwWHRVR3VleExOV0NJUlVtd3FSSnlzRFVSN0MrREVFQWlZRU1PdDZBU2JXTkw0UXgxQ3AKZkRvbTNFdGFPbTUzUTR6QURTdC81NVJtbEUrZWpad09aZEw3bWdKb1l3QmVPRUQvaHJxSXYvOEtmOU0yWW9yQTl1TWp5ZwpQNEpsb1R5V2I0WlhCOEgrUDNrbS9aWWt5ZHpBTUhaTTN4RXZVZXFkNzlIWHlHVW5BRmdSeVdQMUJIZVBmcSt6Z1lFWnVzClBqeU1IZk9manF0T01BQUFBTmNtOXZkRUIwYlhBdGRHVnpkQUVDQXdRRkJnPT0KLS0tLS1FTkQgT1BFTlNTSCBQUklWQVRFIEtFWS0tLS0tCg==

View File

@@ -1,71 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: db-backup-cron
spec:
schedule: "0 5 * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: db-backup
templates:
- name: db-backup
container:
image: nexus2.exem-oss.org/dsk-middleware-backup:v2.0
env:
- name: sh_debug
value: ''
- name: MASTER_HOST
valueFrom:
secretKeyRef:
name: db-creds
key: master_host
- name: MONGO_MANIFEST_PORT
value: '30111'
- name: MONGO_DSK_PORT
value: '30112'
- name: MONGO_USERNAME
valueFrom:
secretKeyRef:
name: db-creds
key: mongo_username
- name: MONGO_PASSWORD
valueFrom:
secretKeyRef:
name: db-creds
key: mongo_password
- name: POSTGRES_PORT
value: '32098'
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
name: db-creds
key: postgres_username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: db-creds
key: postgres_password
- name: S3_BUCKET
value: 'dsk-middleware-backup'
- name: sse
value: 'aws:kms'
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: kms_arn
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: db-creds
type: Opaque
data:
master_host: MTcyLjI0LjEyLjExMQ==
mongo_username: cm9vdA==
mongo_password: bW9uZ28jcGFzcw==
postgres_username: cG9zdGdyZXM=
postgres_password: cm9vdA==

View File

@@ -1,92 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: db-backup-cron
spec:
schedule: "0 5 * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: db-backup
templates:
- name: db-backup
container:
image: nexus2.exem-oss.org/dsk-middleware-backup:v3.0
command: ["sh", "-c"]
args:
- |
# SSH 키를 파일로 저장
echo -n "$SSH_KEY" > /tmp/id_rsa
chmod 600 /tmp/id_rsa
cat /tmp/id_rsa
# SSH 터널링 설정
ssh -i /tmp/id_rsa -fN -L $MONGO_MANIFEST_PORT:$MASTER_HOST:$MONGO_MANIFEST_PORT dev2@bastion.kr.datasaker.io -o StrictHostKeyChecking=no &
ssh -i /tmp/id_rsa -fN -L $MONGO_DSK_PORT:$MASTER_HOST:$MONGO_DSK_PORT dev2@bastion.kr.datasaker.io -o StrictHostKeyChecking=no &
ssh -i /tmp/id_rsa -fN -L $POSTGRES_PORT:$MASTER_HOST:$POSTGRES_PORT dev2@bastion.kr.datasaker.io -o StrictHostKeyChecking=no &
chmod +x /backup.sh
/backup.sh
env:
- name: SSH_KEY
valueFrom:
secretKeyRef:
name: bastion-ssh-key
key: ssh-key
- name: sh_debug
value: ''
- name: MASTER_HOST
valueFrom:
secretKeyRef:
name: db-creds
key: master_host
- name: MONGO_MANIFEST_PORT
value: '30111'
- name: MONGO_DSK_PORT
value: '30112'
- name: MONGO_USERNAME
valueFrom:
secretKeyRef:
name: db-creds
key: mongo_username
- name: MONGO_PASSWORD
valueFrom:
secretKeyRef:
name: db-creds
key: mongo_password
- name: POSTGRES_PORT
value: '32098'
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
name: db-creds
key: postgres_username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: db-creds
key: postgres_password
- name: S3_BUCKET
value: 'dsk-middleware-backup'
- name: sse
value: 'aws:kms'
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: kms_arn
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN
restartPolicy: OnFailure

View File

@@ -1,190 +0,0 @@
#!/bin/bash
#------------------------------------------------------------------------------------------------------
__init (){
datetime=`date "+%Y.%m.%d %H:%M:%S"`
echo -e "*current time : ${datetime}\n" > ${file}
cat ${origin} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__append (){
line_count=`cat ${exec_log} | grep -v -- -- | egrep -v '(name|ri_count)' | wc -l`
echo -e "\n${title} [${line_count}]\n" >> ${file}
cat ${exec_log} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__query_exec (){
steampipe query "${1}" > ${exec_log}
__log_sed
}
#------------------------------------------------------------------------------------------------------
__log_sed (){
sed -i 's/+/|/g' ${exec_log}
sed -i "s/node-role.kubernetes.io\///g" ${exec_log}
sed -i '1d;$d' ${exec_log}
}
#------------------------------------------------------------------------------------------------------
node_query="""
SELECT
name,
annotations ->> 'projectcalico.org/IPv4Address' AS IP,
COALESCE(taints -> 0 ->> 'key', '-') AS Taints_key,
COALESCE(tags ->> 'kops.k8s.io/instancegroup', '-') AS Instance_group,
capacity ->> 'cpu' AS CPU,
CEIL((CAST(regexp_replace(capacity ->> 'memory', 'Ki', '') AS FLOAT) / 1024 / 1024)) AS Memory,
tags ->> 'topology.kubernetes.io/zone' AS Zone,
tags ->> 'beta.kubernetes.io/instance-type' AS Instance_type,
node_info ->> 'osImage' AS OS,
node_info ->> 'kubeletVersion' AS K8S_ver,
node_info ->> 'containerRuntimeVersion' AS Runtime_ver
FROM
kubernetes_node
ORDER BY
Taints_key
"""
resources_query="""
(SELECT
'sts' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_stateful_set
WHERE
name not like 'rel-%')
union
(SELECT
'deploy' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_deployment
WHERE
name not like 'rel-%')
union
(SELECT
'ds' as kind,
name,
number_available as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_daemonset
WHERE
name not like 'rel-%')
order by kind
"""
service_query="""
SELECT
name,
namespace,
type,
lower(p ->> 'nodePort') as Node_Port,
age(current_timestamp, creation_timestamp)
FROM
kubernetes_service,
jsonb_array_elements(ports) as p
WHERE
type='NodePort'
ORDER BY
Node_Port
"""
#name not like '%rel-%'
aws_ri_query="""
SELECT
COALESCE(a.availability_zone, b.availability_zone, '-') AS availability_zone,
COALESCE(a.instance_type, b.instance_type, c.instance_type, '-') AS instance_type,
COALESCE(c.cpu, 0) AS cpu,
COALESCE(c.memory, 0) AS memory,
COALESCE(a.ri_count, 0) AS ri_count,
COALESCE(b.ec2_count, 0) AS ec2_count,
COALESCE(b.ec2_count, 0) - COALESCE(a.ri_count, 0) AS result
FROM
(SELECT
availability_zone,
instance_type,
SUM(instance_count) AS ri_count
FROM
aws_ec2_reserved_instance
WHERE
instance_state='active'
GROUP BY
availability_zone,
instance_type
) a
FULL OUTER JOIN
(SELECT
placement_availability_zone AS availability_zone,
instance_type,
COUNT(*) AS ec2_count
FROM
aws_ec2_instance
WHERE
instance_state='running' AND
instance_lifecycle!='spot'
GROUP BY
availability_zone,
instance_type
) b
ON
a.availability_zone = b.availability_zone AND
a.instance_type = b.instance_type
INNER JOIN
(SELECT
instance_type,
(CAST(memory_info ->> 'SizeInMiB' AS FLOAT) / 1024) AS memory,
(CAST(v_cpu_info ->> 'DefaultCores' AS FLOAT) * 2) AS cpu
FROM
aws_ec2_instance_type
WHERE
instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running')
GROUP BY
instance_type, memory, cpu
) c
ON
COALESCE(a.instance_type, b.instance_type, '-') = c.instance_type
ORDER BY availability_zone
"""
#instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running' AND instance_lifecycle!='spot')
#------------------------------------------------------------------------------------------------------
origin="org_README.md"
exec_log="/shared-data/query.log"
file="/shared-data/README2.md"
#------------------------------------------------------------------------------------------------------
__init
title="## 노드 목록"
__query_exec "${node_query}"
__append
title="## 리소스 목록"
__query_exec "${resources_query}"
__append
title="## 서비스 목록 (NodePort)"
__query_exec "${service_query}"
__append
title="## 예약 인스턴스 사용 내역"
__query_exec "${aws_ri_query}"
__append
#------------------------------------------------------------------------------------------------------
rm ${exec_log}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: steampipe
spec:
containers:
- name: steampipe
image: ghcr.io/turbot/steampipe:latest
command: ["/bin/bash"]
args: ["-c", "while true; do sleep 10; done"]

View File

@@ -1,112 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: scripts-bash-
spec:
entrypoint: bash-script-example
volumes:
- name: script-volume
configMap:
name: steampipe-script
- name: kubeconfig
secret:
secretName: my-kubeconfig
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: bash-script-example
dag:
tasks:
- name: steampipe
template: steampipe
- name: git
template: git
dependencies:
- steampipe
- name: steampipe
script:
image: ghcr.io/turbot/steampipe:latest
command: [bash]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
- name: kubeconfig
mountPath: /kubeconfig
env:
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: KMS_ARN
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN
- name: KUBECONFIG
value: /kubeconfig/config
source: |
# 쿼리 결과를 공유 볼륨에 저장
steampipe plugin install aws
steampipe plugin install kubernetes
sleep 1
steampipe service restart --force
sleep 1
cd /shared-data/
sleep 1
bash /scripts/script.sh
- name: git
script:
image: alpine/git:latest
command: [sh]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: git-credentials
key: username
- name: GIT_PASSWORD
valueFrom:
secretKeyRef:
name: git-credentials
key: password
source: |
git config --global credential.helper 'store --file /tmp/credentials'
echo "https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com" > /tmp/credentials
git config --global user.email "havelight@ex-em.com"
git config --global user.name "jaehee-jung"
cd /shared-data # 경로 내의 파일 목록 확인
git clone https://github.com/CloudMOA/dsk-iac.git
# 공유 볼륨에서 쿼리 결과 읽기
cp README2.md dsk-iac/README.md
# 커밋 및 푸시 작업 수행
cd dsk-iac
git add README.md
git commit -m "Update README with query result"
git push

View File

@@ -1,143 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: steampipe-aws-report
spec:
schedule: "0 8 * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: bash-script-example
volumes:
- name: script-volume
configMap:
name: steampipe-script
- name: kubeconfig
secret:
secretName: my-kubeconfig
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: bash-script-example
dag:
tasks:
- name: git-steampipe
template: git-steampipe
- name: steampipe
template: steampipe
dependencies:
- git-steampipe
- name: git-argo-workflows
template: git-argo-workflows
dependencies:
- steampipe
- name: steampipe
script:
image: ghcr.io/turbot/steampipe:latest
command: [bash]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
- name: kubeconfig
mountPath: /kubeconfig
env:
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: KMS_ARN
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN
- name: KUBECONFIG
value: /kubeconfig/config
source: |
# 쿼리 결과를 공유 볼륨에 저장
steampipe plugin install aws
sleep 3
steampipe service restart --force
sleep 1
cd /shared-data/steampipe-mod-aws-compliance
steampipe check benchmark.foundational_security --output=md > ../README.md
sleep 1
- name: git-steampipe
script:
image: alpine/git:latest
command: [sh]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: git-credentials
key: username
- name: GIT_PASSWORD
valueFrom:
secretKeyRef:
name: git-credentials
key: password
source: |
cd /shared-data/
git clone https://github.com/turbot/steampipe-mod-aws-compliance.git
- name: git-argo-workflows
script:
image: alpine/git:latest
command: [sh]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: git-credentials
key: username
- name: GIT_PASSWORD
valueFrom:
secretKeyRef:
name: git-credentials
key: password
source: |
cd /shared-data/
git config --global credential.helper 'store --file /tmp/credentials'
echo "https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com" > /tmp/credentials
git config --global user.email "havelight@ex-em.com"
git config --global user.name "jaehee-jung"
git clone https://github.com/CloudMOA/argo-workflows.git
# 공유 볼륨에서 쿼리 결과 읽기
cp README.md argo-workflows/README.md
# 커밋 및 푸시 작업 수행
cd argo-workflows
git add README.md
git commit -m "Update README with query result"
git push

View File

@@ -1,200 +0,0 @@
kind: ConfigMap
metadata:
annotations:
name: steampipe-script
namespace: argo-workflows
apiVersion: v1
data:
script.sh: |
#!/bin/bash
#------------------------------------------------------------------------------------------------------
__init (){
touch ${origin}
touch ${exec_log}
datetime=`TZ='Asia/Seoul' date "+%Y.%m.%d %H:%M:%S"`
echo -e "*update time : ${datetime}\n" > ${file}
cat ${origin} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__append (){
line_count=`cat ${exec_log} | grep -v -- -- | egrep -v '(name|ri_count)' | wc -l`
echo -e "\n${title} [${line_count}]\n" >> ${file}
cat ${exec_log} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__query_exec (){
steampipe query "${1}" > ${exec_log}
__log_sed
}
#------------------------------------------------------------------------------------------------------
__log_sed (){
sed -i 's/+/|/g' ${exec_log}
sed -i "s/node-role.kubernetes.io\///g" ${exec_log}
sed -i '1d;$d' ${exec_log}
}
#------------------------------------------------------------------------------------------------------
node_query="""
SELECT
name,
annotations ->> 'projectcalico.org/IPv4Address' AS IP,
COALESCE(taints -> 0 ->> 'key', '-') AS Taints_key,
COALESCE(tags ->> 'kops.k8s.io/instancegroup', '-') AS Instance_group,
capacity ->> 'cpu' AS CPU,
CEIL((CAST(regexp_replace(capacity ->> 'memory', 'Ki', '') AS FLOAT) / 1024 / 1024)) AS Memory,
tags ->> 'topology.kubernetes.io/zone' AS Zone,
tags ->> 'beta.kubernetes.io/instance-type' AS Instance_type,
node_info ->> 'osImage' AS OS,
node_info ->> 'kubeletVersion' AS K8S_ver,
node_info ->> 'containerRuntimeVersion' AS Runtime_ver
FROM
kubernetes_node
ORDER BY
Taints_key
"""
resources_query="""
(SELECT
'sts' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_stateful_set
WHERE
name not like 'rel-%')
union
(SELECT
'deploy' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_deployment
WHERE
name not like 'rel-%')
union
(SELECT
'ds' as kind,
name,
number_available as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_daemonset
WHERE
name not like 'rel-%')
order by kind
"""
service_query="""
SELECT
name,
namespace,
type,
lower(p ->> 'nodePort') as Node_Port,
age(current_timestamp, creation_timestamp)
FROM
kubernetes_service,
jsonb_array_elements(ports) as p
WHERE
type='NodePort'
ORDER BY
Node_Port
"""
#name not like '%rel-%'
aws_ri_query="""
SELECT
COALESCE(a.availability_zone, b.availability_zone, '-') AS availability_zone,
COALESCE(a.instance_type, b.instance_type, c.instance_type, '-') AS instance_type,
COALESCE(c.cpu, 0) AS cpu,
COALESCE(c.memory, 0) AS memory,
COALESCE(a.ri_count, 0) AS ri_count,
COALESCE(b.ec2_count, 0) AS ec2_count,
COALESCE(b.ec2_count, 0) - COALESCE(a.ri_count, 0) AS result
FROM
(SELECT
availability_zone,
instance_type,
SUM(instance_count) AS ri_count
FROM
aws_ec2_reserved_instance
WHERE
instance_state='active'
GROUP BY
availability_zone,
instance_type
) a
FULL OUTER JOIN
(SELECT
placement_availability_zone AS availability_zone,
instance_type,
COUNT(*) AS ec2_count
FROM
aws_ec2_instance
WHERE
instance_state='running' AND
instance_lifecycle!='spot'
GROUP BY
availability_zone,
instance_type
) b
ON
a.availability_zone = b.availability_zone AND
a.instance_type = b.instance_type
INNER JOIN
(SELECT
instance_type,
(CAST(memory_info ->> 'SizeInMiB' AS FLOAT) / 1024) AS memory,
(CAST(v_cpu_info ->> 'DefaultCores' AS FLOAT) * 2) AS cpu
FROM
aws_ec2_instance_type
WHERE
instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running')
GROUP BY
instance_type, memory, cpu
) c
ON
COALESCE(a.instance_type, b.instance_type, '-') = c.instance_type
ORDER BY availability_zone
"""
#instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running' AND instance_lifecycle!='spot')
#------------------------------------------------------------------------------------------------------
origin="/shared-data/org_README.md"
exec_log="/shared-data/query.log"
file="/shared-data/README2.md"
#------------------------------------------------------------------------------------------------------
__init
title="## 노드 목록"
__query_exec "${node_query}"
__append
title="## 리소스 목록"
__query_exec "${resources_query}"
__append
title="## 서비스 목록 (NodePort)"
__query_exec "${service_query}"
__append
title="## 예약 인스턴스 사용 내역"
__query_exec "${aws_ri_query}"
__append
#------------------------------------------------------------------------------------------------------
rm ${exec_log}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: steampipe
spec:
containers:
- name: steampipe
image: ghcr.io/turbot/steampipe:latest
command: ["/bin/bash"]
args: ["-c", "while true; do sleep 10; done"]

View File

@@ -1,112 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: scripts-bash-
spec:
entrypoint: bash-script-example
volumes:
- name: script-volume
configMap:
name: steampipe-script
- name: kubeconfig
secret:
secretName: my-kubeconfig
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: bash-script-example
dag:
tasks:
- name: steampipe
template: steampipe
- name: git
template: git
dependencies:
- steampipe
- name: steampipe
script:
image: ghcr.io/turbot/steampipe:latest
command: [bash]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
- name: kubeconfig
mountPath: /kubeconfig
env:
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: KMS_ARN
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN
- name: KUBECONFIG
value: /kubeconfig/config
source: |
# 쿼리 결과를 공유 볼륨에 저장
steampipe plugin install aws
steampipe plugin install kubernetes
sleep 1
steampipe service restart --force
sleep 1
cd /shared-data/
sleep 1
bash /scripts/script.sh
- name: git
script:
image: alpine/git:latest
command: [sh]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: git-credentials
key: username
- name: GIT_PASSWORD
valueFrom:
secretKeyRef:
name: git-credentials
key: password
source: |
git config --global credential.helper 'store --file /tmp/credentials'
echo "https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com" > /tmp/credentials
git config --global user.email "havelight@ex-em.com"
git config --global user.name "jaehee-jung"
cd /shared-data # 경로 내의 파일 목록 확인
git clone https://github.com/CloudMOA/dsk-iac.git
# 공유 볼륨에서 쿼리 결과 읽기
cp README2.md dsk-iac/README.md
# 커밋 및 푸시 작업 수행
cd dsk-iac
git add README.md
git commit -m "Update README with query result"
git push

View File

@@ -1,115 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: steampipe-iac-cron
spec:
schedule: "0 8 * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: bash-script-example
volumes:
- name: script-volume
configMap:
name: steampipe-script
- name: kubeconfig
secret:
secretName: my-kubeconfig
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: bash-script-example
dag:
tasks:
- name: steampipe
template: steampipe
- name: git
template: git
dependencies:
- steampipe
- name: steampipe
script:
image: ghcr.io/turbot/steampipe:latest
command: [bash]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
- name: kubeconfig
mountPath: /kubeconfig
env:
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_REGION
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-creds
key: AWS_SECRET_ACCESS_KEY
- name: KMS_ARN
valueFrom:
secretKeyRef:
name: aws-creds
key: KMS_ARN
- name: KUBECONFIG
value: /kubeconfig/config
source: |
# 쿼리 결과를 공유 볼륨에 저장
steampipe plugin install aws
steampipe plugin install kubernetes
sleep 3
steampipe service restart --force
sleep 1
cd /shared-data/
sleep 1
bash /scripts/script.sh
- name: git
script:
image: alpine/git:latest
command: [sh]
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: script-volume
mountPath: /scripts
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: git-credentials
key: username
- name: GIT_PASSWORD
valueFrom:
secretKeyRef:
name: git-credentials
key: password
source: |
git config --global credential.helper 'store --file /tmp/credentials'
echo "https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com" > /tmp/credentials
git config --global user.email "havelight@ex-em.com"
git config --global user.name "jaehee-jung"
cd /shared-data # 경로 내의 파일 목록 확인
git clone https://github.com/CloudMOA/dsk-iac.git
# 공유 볼륨에서 쿼리 결과 읽기
cp README2.md dsk-iac/README.md
# 커밋 및 푸시 작업 수행
cd dsk-iac
git add README.md
git commit -m "Update README with query result"
git push

View File

@@ -1,200 +0,0 @@
kind: ConfigMap
metadata:
annotations:
name: steampipe-script
namespace: argo-workflows
apiVersion: v1
data:
script.sh: |
#!/bin/bash
#------------------------------------------------------------------------------------------------------
__init (){
touch ${origin}
touch ${exec_log}
datetime=`TZ='Asia/Seoul' date "+%Y.%m.%d %H:%M:%S"`
echo -e "*update time : ${datetime}\n" > ${file}
cat ${origin} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__append (){
line_count=`cat ${exec_log} | grep -v -- -- | egrep -v '(name|ri_count)' | wc -l`
echo -e "\n${title} [${line_count}]\n" >> ${file}
cat ${exec_log} >> ${file}
}
#------------------------------------------------------------------------------------------------------
__query_exec (){
steampipe query "${1}" > ${exec_log}
__log_sed
}
#------------------------------------------------------------------------------------------------------
__log_sed (){
sed -i 's/+/|/g' ${exec_log}
sed -i "s/node-role.kubernetes.io\///g" ${exec_log}
sed -i '1d;$d' ${exec_log}
}
#------------------------------------------------------------------------------------------------------
node_query="""
SELECT
name,
annotations ->> 'projectcalico.org/IPv4Address' AS IP,
COALESCE(taints -> 0 ->> 'key', '-') AS Taints_key,
COALESCE(tags ->> 'kops.k8s.io/instancegroup', '-') AS Instance_group,
capacity ->> 'cpu' AS CPU,
CEIL((CAST(regexp_replace(capacity ->> 'memory', 'Ki', '') AS FLOAT) / 1024 / 1024)) AS Memory,
tags ->> 'topology.kubernetes.io/zone' AS Zone,
tags ->> 'beta.kubernetes.io/instance-type' AS Instance_type,
node_info ->> 'osImage' AS OS,
node_info ->> 'kubeletVersion' AS K8S_ver,
node_info ->> 'containerRuntimeVersion' AS Runtime_ver
FROM
kubernetes_node
ORDER BY
Taints_key
"""
resources_query="""
(SELECT
'sts' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_stateful_set
WHERE
name not like 'rel-%')
union
(SELECT
'deploy' as kind,
name,
available_replicas as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_deployment
WHERE
name not like 'rel-%')
union
(SELECT
'ds' as kind,
name,
number_available as count,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'cpu' AS request_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'requests' ->> 'memory' AS request_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'cpu' AS limit_cpu,
jsonb_array_elements(template -> 'spec' -> 'containers') -> 'resources' -> 'limits' ->> 'memory' AS limit_mem,
jsonb_array_elements(template -> 'spec' -> 'containers') ->> 'name' AS c_name,
namespace
FROM
kubernetes_daemonset
WHERE
name not like 'rel-%')
order by kind
"""
service_query="""
SELECT
name,
namespace,
type,
lower(p ->> 'nodePort') as Node_Port,
age(current_timestamp, creation_timestamp)
FROM
kubernetes_service,
jsonb_array_elements(ports) as p
WHERE
type='NodePort'
ORDER BY
Node_Port
"""
#name not like '%rel-%'
aws_ri_query="""
SELECT
COALESCE(a.availability_zone, b.availability_zone, '-') AS availability_zone,
COALESCE(a.instance_type, b.instance_type, c.instance_type, '-') AS instance_type,
COALESCE(c.cpu, 0) AS cpu,
COALESCE(c.memory, 0) AS memory,
COALESCE(a.ri_count, 0) AS ri_count,
COALESCE(b.ec2_count, 0) AS ec2_count,
COALESCE(b.ec2_count, 0) - COALESCE(a.ri_count, 0) AS result
FROM
(SELECT
availability_zone,
instance_type,
SUM(instance_count) AS ri_count
FROM
aws_ec2_reserved_instance
WHERE
instance_state='active'
GROUP BY
availability_zone,
instance_type
) a
FULL OUTER JOIN
(SELECT
placement_availability_zone AS availability_zone,
instance_type,
COUNT(*) AS ec2_count
FROM
aws_ec2_instance
WHERE
instance_state='running' AND
instance_lifecycle!='spot'
GROUP BY
availability_zone,
instance_type
) b
ON
a.availability_zone = b.availability_zone AND
a.instance_type = b.instance_type
INNER JOIN
(SELECT
instance_type,
(CAST(memory_info ->> 'SizeInMiB' AS FLOAT) / 1024) AS memory,
(CAST(v_cpu_info ->> 'DefaultCores' AS FLOAT) * 2) AS cpu
FROM
aws_ec2_instance_type
WHERE
instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running')
GROUP BY
instance_type, memory, cpu
) c
ON
COALESCE(a.instance_type, b.instance_type, '-') = c.instance_type
ORDER BY availability_zone
"""
#instance_type in (SELECT instance_type FROM aws_ec2_instance WHERE instance_state='running' AND instance_lifecycle!='spot')
#------------------------------------------------------------------------------------------------------
origin="/shared-data/org_README.md"
exec_log="/shared-data/query.log"
file="/shared-data/README2.md"
#------------------------------------------------------------------------------------------------------
__init
title="## 노드 목록"
__query_exec "${node_query}"
__append
title="## 리소스 목록"
__query_exec "${resources_query}"
__append
title="## 서비스 목록 (NodePort)"
__query_exec "${service_query}"
__append
title="## 예약 인스턴스 사용 내역"
__query_exec "${aws_ri_query}"
__append
#------------------------------------------------------------------------------------------------------
rm ${exec_log}

View File

@@ -1,34 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: scripts-bash-
spec:
entrypoint: bash-script-example
templates:
- name: bash-script-example
steps:
- - name: generate
template: steampipe
- - name: print
template: print-message
arguments:
parameters:
- name: message
value: "{{steps.generate.outputs.result}}"
- name: steampipe
script:
image: ghcr.io/turbot/steampipe:latest
command: [bash]
source: |
steampipe
- name: print-message
inputs:
parameters:
- name: message
container:
image: alpine:latest
command: [sh, -c]
args: ["echo result was: {{inputs.parameters.message}}"]

View File

@@ -1,6 +0,0 @@
GIT_USERNAME="jaehee-jung"
GIT_PASSWORD="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"
git config --global credential.helper 'store --file /etc/git/credentials'
echo "https://${GIT_USERNAME}:${GIT_PASSWORD}@github.com" > /etc/git/credentials

View File

@@ -1,49 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: ui-monitoring-cron
spec:
schedule: "0 * * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: python-example
volumes:
- name: config-volume
configMap:
name: ui-monitoring
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: python-example
dag:
tasks:
- name: ui-monitoring
template: ui-monitoring
- name: ui-monitoring
container:
image: nexus2.exem-oss.org/ui_monitoring:v0.6
command: ["sh", "-c"]
args:
- |
echo "======================================"
python /tmp/dsk_playwright.py
echo "======================================"
echo -n "$SSH_KEY" > /tmp/id_rsa
chmod 600 /tmp/id_rsa
scp -i /tmp/id_rsa -o StrictHostKeyChecking=no -rp /shared-data/*_screenshot* root@10.10.43.98:/tmp/
env:
- name: SSH_KEY
valueFrom:
secretKeyRef:
name: bastion-ssh-key
key: ssh-key
volumeMounts:
- name: shared-data
mountPath: /shared-data

View File

@@ -1,37 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: ui-monitoring-cron
spec:
schedule: "0 * * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: python-example
volumes:
- name: config-volume
configMap:
name: ui-monitoring
volumeClaimTemplates:
- metadata:
name: shared-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: python-example
dag:
tasks:
- name: ui-monitoring
template: ui-monitoring
- name: ui-monitoring
script:
image: nexus2.exem-oss.org/ui_monitoring:v0.2
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: config-volume
mountPath: /tmp/config.json
subPath: config.json

View File

@@ -1,62 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: ui-monitoring-cron-test
spec:
schedule: "0 * * * *"
timezone: "Asia/Seoul"
workflowSpec:
entrypoint: python-example
volumes:
- name: config-volume
configMap:
name: ui-monitoring
volumeClaimTemplates:
- metadata:
name: shared-data-{{workflow.name}}
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
templates:
- name: python-example
dag:
tasks:
- name: ui-monitoring
template: ui-monitoring
- name: move-capture
template: scp
dependencies:
- ui-monitoring
- name: ui-monitoring
script:
image: nexus2.exem-oss.org/ui_monitoring:v0.3
volumeMounts:
- name: shared-data-{{workflow.name}}
mountPath: /shared-data
- name: config-volume
mountPath: /tmp/config.json
subPath: config.json
- name: scp
container:
image: nexus2.exem-oss.org/dsk-openssh-client:v1.0
command: ["sh", "-c"]
args:
- |
echo -n "$SSH_KEY" > /tmp/id_rsa
chmod 600 /tmp/id_rsa
while true; do sleep 1; done
scp -i /tmp/id_rsa -o StrictHostKeyChecking=no -rp /shared-data/*_screenshot* root@10.10.43.98:/tmp/
scp -i /tmp/id_rsa -o StrictHostKeyChecking=no -rp /shared-data/*_error* root@10.10.43.98:/tmp/
env:
- name: SSH_KEY
valueFrom:
secretKeyRef:
name: bastion-ssh-key
key: ssh-key
volumeMounts:
- name: shared-data-{{workflow.name}}
mountPath: /shared-data

View File

@@ -1,16 +0,0 @@
kind: ConfigMap
metadata:
annotations:
name: ui-monitoring
namespace: argo-workflows
apiVersion: v1
data:
config.json: |
{
"dsk_url": "https://app.kr.datasaker.io",
"dsk_username": "support@ex-em.com",
"dsk_password": "saasadmin12#$",
"slack_webhook_url" : "https://hooks.slack.com/services/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",
"timeout": 10000
}

View File

@@ -56,6 +56,11 @@ spec:
args: [ "hello world" ]
```
### Argo Workflows Server SSO
In order to enable SSO, please configure `.Values.server.sso` and `.Values.server.authMode`.
In order to manage access levels, you can optionally add RBAC to SSO. Please refer [SSO RBAC] for more details.
## Installing the Chart
To install the chart with the release name `my-release`:
@@ -108,6 +113,7 @@ Fields to note:
|-----|------|---------|-------------|
| apiVersionOverrides.autoscaling | string | `""` | String to override apiVersion of autoscaling rendered by this helm chart |
| apiVersionOverrides.cloudgoogle | string | `""` | String to override apiVersion of GKE resources rendered by this helm chart |
| commonLabels | object | `{}` | Labels to set on all resources |
| crds.annotations | object | `{}` | Annotations to be added to all CRDs |
| crds.install | bool | `true` | Install and upgrade CRDs |
| crds.keep | bool | `true` | Keep CRDs on chart uninstall |
@@ -141,6 +147,9 @@ Fields to note:
| controller.affinity | object | `{}` | Assign custom [affinity] rules |
| controller.clusterWorkflowTemplates.enabled | bool | `true` | Create a ClusterRole and CRB for the controller to access ClusterWorkflowTemplates. |
| controller.columns | list | `[]` | Configure Argo Server to show custom [columns] |
| controller.configMap.create | bool | `true` | Create a ConfigMap for the controller |
| controller.configMap.name | string | `""` | ConfigMap name |
| controller.cronWorkflowWorkers | string | `nil` | Number of cron workflow workers Only valid for 3.5+ |
| controller.deploymentAnnotations | object | `{}` | deploymentAnnotations is an optional map of annotations to be applied to the controller Deployment |
| controller.extraArgs | list | `[]` | Extra arguments to be added to the controller |
| controller.extraContainers | list | `[]` | Extra containers to be added to the controller deployment |
@@ -181,6 +190,7 @@ Fields to note:
| controller.pdb.enabled | bool | `false` | Configure [Pod Disruption Budget] for the controller pods |
| controller.persistence | object | `{}` | enable persistence using postgres |
| controller.podAnnotations | object | `{}` | podAnnotations is an optional map of annotations to be applied to the controller Pods |
| controller.podCleanupWorkers | string | `nil` | Number of pod cleanup workers |
| controller.podGCDeleteDelayDuration | string | `5s` (Argo Workflows default) | The duration in seconds before the pods in the GC queue get deleted. A zero value indicates that the pods will be deleted immediately. |
| controller.podGCGracePeriodSeconds | string | `30` seconds (Kubernetes default) | Specifies the duration in seconds before a terminating pod is forcefully killed. A zero value indicates that the pod will be forcefully terminated immediately. |
| controller.podLabels | object | `{}` | Optional labels to add to the controller pods |
@@ -194,6 +204,7 @@ Fields to note:
| controller.resourceRateLimit | object | `{}` | Globally limits the rate at which pods are created. This is intended to mitigate flooding of the Kubernetes API server by workflows with a large amount of parallel nodes. |
| controller.resources | object | `{}` | Resource limits and requests for the controller |
| controller.retentionPolicy | object | `{}` | Workflow retention by number of workflows |
| controller.revisionHistoryLimit | int | `10` | The number of revisions to keep. |
| controller.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true}` | the controller container's securityContext |
| controller.serviceAccount.annotations | object | `{}` | Annotations applied to created service account |
| controller.serviceAccount.create | bool | `true` | Create a service account for the controller |
@@ -220,6 +231,7 @@ Fields to note:
| controller.workflowDefaults | object | `{}` | Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level. Only valid for 2.7+ |
| controller.workflowNamespaces | list | `["default"]` | Specify all namespaces where this workflow controller instance will manage workflows. This controls where the service account and RBAC resources will be created. Only valid when singleNamespace is false. |
| controller.workflowRestrictions | object | `{}` | Restricts the Workflows that the controller will process. Only valid for 2.9+ |
| controller.workflowTTLWorkers | string | `nil` | Number of workflow TTL workers |
| controller.workflowWorkers | string | `nil` | Number of workflow workers |
### Workflow Main Container
@@ -256,6 +268,8 @@ Fields to note:
| server.GKEmanagedCertificate.domains | list | `["argoworkflows.example.com"]` | Domains for the Google Managed Certificate |
| server.GKEmanagedCertificate.enabled | bool | `false` | Enable ManagedCertificate custom resource for Google Kubernetes Engine. |
| server.affinity | object | `{}` | Assign custom [affinity] rules |
| server.authMode | string | `""` | Deprecated; use server.authModes instead. |
| server.authModes | list | `[]` | A list of supported authentication modes. Available values are `server`, `client`, or `sso`. If you provide sso, please configure `.Values.server.sso` as well. |
| server.autoscaling.behavior | object | `{}` | Configures the scaling behavior of the target in both Up and Down directions. This is only available on HPA apiVersion `autoscaling/v2beta2` and newer |
| server.autoscaling.enabled | bool | `false` | Enable Horizontal Pod Autoscaler ([HPA]) for the Argo Server |
| server.autoscaling.maxReplicas | int | `5` | Maximum number of replicas for the Argo Server [HPA] |
@@ -267,10 +281,11 @@ Fields to note:
| server.clusterWorkflowTemplates.enabled | bool | `true` | Create a ClusterRole and CRB for the server to access ClusterWorkflowTemplates. |
| server.deploymentAnnotations | object | `{}` | optional map of annotations to be applied to the ui Deployment |
| server.enabled | bool | `true` | Deploy the Argo Server |
| server.extraArgs | list | `[]` | Extra arguments to provide to the Argo server binary, such as for disabling authentication. |
| server.extraArgs | list | `[]` | Extra arguments to provide to the Argo server binary. |
| server.extraContainers | list | `[]` | Extra containers to be added to the server deployment |
| server.extraEnv | list | `[]` | Extra environment variables to provide to the argo-server container |
| server.extraInitContainers | list | `[]` | Enables init containers to be added to the server deployment |
| server.hostAliases | list | `[]` | Mapping between IP and hostnames that will be injected as entries in the pod's hosts files |
| server.image.registry | string | `"quay.io"` | Registry to use for the server |
| server.image.repository | string | `"argoproj/argocli"` | Repository to use for the server |
| server.image.tag | string | `""` | Image tag for the Argo Workflows server. Defaults to `.Values.images.tag`. |
@@ -298,6 +313,7 @@ Fields to note:
| server.rbac.create | bool | `true` | Adds Role and RoleBinding for the server. |
| server.replicas | int | `1` | The number of server pods to run |
| server.resources | object | `{}` | Resource limits and requests for the server |
| server.revisionHistoryLimit | int | `10` | The number of revisions to keep. |
| server.secure | bool | `false` | Run the argo server in "secure" mode. Configure this value instead of `--secure` in extraArgs. |
| server.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":false,"runAsNonRoot":true}` | Servers container-level security context |
| server.serviceAccount.annotations | object | `{}` | Annotations applied to created service account |
@@ -315,16 +331,18 @@ Fields to note:
| server.sso.clientSecret.key | string | `"client-secret"` | Key of a secret to retrieve the app OIDC client secret |
| server.sso.clientSecret.name | string | `"argo-server-sso"` | Name of a secret to retrieve the app OIDC client secret |
| server.sso.customGroupClaimName | string | `""` | Override claim name for OIDC groups |
| server.sso.enabled | bool | `false` | Create SSO configuration |
| server.sso.enabled | bool | `false` | Create SSO configuration. If you set `true` , please also set `.Values.server.authMode` as `sso`. |
| server.sso.filterGroupsRegex | list | `[]` | Filter the groups returned by the OIDC provider |
| server.sso.insecureSkipVerify | bool | `false` | Skip TLS verification for the HTTP client |
| server.sso.issuer | string | `"https://accounts.google.com"` | The root URL of the OIDC identity provider |
| server.sso.issuerAlias | string | `""` | Alternate root URLs that can be included for some OIDC providers |
| server.sso.rbac.enabled | bool | `true` | Adds ServiceAccount Policy to server (Cluster)Role. |
| server.sso.rbac.secretWhitelist | list | `[]` | Whitelist to allow server to fetch Secrets |
| server.sso.redirectUrl | string | `"https://argo/oauth2/callback"` | |
| server.sso.redirectUrl | string | `""` | |
| server.sso.scopes | list | `[]` | Scopes requested from the SSO ID provider |
| server.sso.sessionExpiry | string | `""` | Define how long your login is valid for (in hours) |
| server.sso.userInfoPath | string | `""` | Specify the user info endpoint that contains the groups claim |
| server.tmpVolume | object | `{"emptyDir":{}}` | Volume to be mounted in Pods for temporary files. |
| server.tolerations | list | `[]` | [Tolerations] for use with node taints |
| server.topologySpreadConstraints | list | `[]` | Assign custom [TopologySpreadConstraints] rules to the argo server |
| server.volumeMounts | list | `[]` | Additional volume mounts to the server main container. |
@@ -374,3 +392,4 @@ Fields to note:
[TopologySpreadConstraints]: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
[values.yaml]: values.yaml
[changelog]: https://artifacthub.io/packages/helm/argo/argo-workflows?modal=changelog
[SSO RBAC]: https://argoproj.github.io/argo-workflows/argo-server-sso/#sso-rbac

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: argo-workflows-server.service-account-token
namespace: argo-workflows
annotations:
kubernetes.io/service-account.name: argo-workflows-server
type: kubernetes.io/service-account-token

View File

@@ -1,38 +1,22 @@
singleNamespace: true
# -- String to partially override "argo-workflowss.fullname" template
nameOverride: argo-workflows
# -- String to fully override "argo-workflowss.fullname" template
fullnameOverride: argo-workflows
workflow:
serviceAccount:
create: true
name: "argo-workflows"
rbac:
create: true
controller:
metricsConfig:
enabled: true
logging:
level: debug
rbac:
accessAllSecrets: true
writeConfigMaps: true
serviceType: NodePort
extraEnv:
- name: ARGO_EXECUTOR_PLUGINS
value: "true"
mainContainer:
resources:
limits:
cpu: 2
memory: "1Gi"
requests:
memory: "512Mi"
server:
serviceType: NodePort
serviceNodePort: 30800
secure: true
extraArgs:
- --auth-mode=server
serviceNodePort: 32746

View File

@@ -1,3 +1,7 @@
{{- if .Values.server.authMode }}
DEPRECATED option server.authMode - Use server.authModes
{{- end }}
1. Get Argo Server external IP/domain by running:
kubectl --namespace {{ .Release.Namespace }} get services -o wide | grep {{ template "argo-workflows.server.fullname" . }}

View File

@@ -79,6 +79,9 @@ helm.sh/chart: {{ include "argo-workflows.chart" .context }}
{{ include "argo-workflows.selectorLabels" (dict "context" .context "component" .component "name" .name) }}
app.kubernetes.io/managed-by: {{ .context.Release.Service }}
app.kubernetes.io/part-of: argo-workflows
{{- with .context.Values.commonLabels }}
{{ toYaml .}}
{{- end }}
{{- end }}
{{/*
@@ -94,6 +97,13 @@ app.kubernetes.io/component: {{ .component }}
{{- end }}
{{- end }}
{{/*
Create the name of the controller configMap
*/}}
{{- define "argo-workflows.controller.config-map.name" -}}
{{- .Values.controller.configMap.name | default (printf "%s-%s" (include "argo-workflows.controller.fullname" .) "configmap") | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the server service account to use
*/}}

View File

@@ -20,6 +20,12 @@ rules:
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
- workflowtasksets
- workflowtasksets/finalizers
- workflowtaskresults
- workflowtaskresults/finalizers
- workflowartifactgctasks
- workflowartifactgctasks/finalizers
verbs:
- get
- list
@@ -46,6 +52,12 @@ rules:
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
- workflowtasksets
- workflowtasksets/finalizers
- workflowtaskresults
- workflowtaskresults/finalizers
- workflowartifactgctasks
- workflowartifactgctasks/finalizers
verbs:
- create
- delete
@@ -71,14 +83,18 @@ rules:
- workflows/finalizers
- workfloweventbindings
- workfloweventbindings/finalizers
- workflowtasksets
- workflowtasksets/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
- workflowtasksets
- workflowtasksets/finalizers
- workflowtaskresults
- workflowtaskresults/finalizers
- workflowartifactgctasks
- workflowartifactgctasks/finalizers
verbs:
- create
- delete

View File

@@ -1,7 +1,8 @@
{{- if .Values.controller.configMap.create }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "argo-workflows.controller.fullname" . }}-configmap
name: {{ template "argo-workflows.controller.config-map.name" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "argo-workflows.labels" (dict "context" . "component" .Values.controller.name "name" "cm") | nindent 4 }}
@@ -144,7 +145,7 @@ data:
rbac:
enabled: {{ .Values.server.sso.rbac.enabled }}
{{- with .Values.server.sso.scopes }}
scopes: {{ toYaml . | nindent 8 }}
scopes: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.server.sso.issuerAlias }}
issuerAlias: {{ toYaml . }}
@@ -161,6 +162,9 @@ data:
{{- with .Values.server.sso.insecureSkipVerify }}
insecureSkipVerify: {{ toYaml . }}
{{- end }}
{{- with .Values.server.sso.filterGroupsRegex }}
filterGroupsRegex: {{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.controller.workflowRestrictions }}
workflowRestrictions: {{- toYaml . | nindent 6 }}
@@ -191,3 +195,4 @@ data:
{{- with .Values.controller.podGCDeleteDelayDuration }}
podGCDeleteDelayDuration: {{ . }}
{{- end }}
{{- end }}

View File

@@ -12,6 +12,7 @@ metadata:
{{- end }}
spec:
replicas: {{ .Values.controller.replicas }}
revisionHistoryLimit: {{ .Values.controller.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "argo-workflows.selectorLabels" (dict "context" . "name" .Values.controller.name) | nindent 6 }}
@@ -44,7 +45,7 @@ spec:
command: [ "workflow-controller" ]
args:
- "--configmap"
- "{{ template "argo-workflows.controller.fullname" . }}-configmap"
- "{{ template "argo-workflows.controller.config-map.name" . }}"
- "--executor-image"
- "{{- include "argo-workflows.image" (dict "context" . "image" .Values.executor.image) }}:{{ default (include "argo-workflows.defaultTag" .) .Values.executor.image.tag }}"
- "--loglevel"
@@ -60,6 +61,18 @@ spec:
- "--workflow-workers"
- {{ . | quote }}
{{- end }}
{{- with .Values.controller.workflowTTLWorkers }}
- "--workflow-ttl-workers"
- {{ . | quote }}
{{- end }}
{{- with .Values.controller.podCleanupWorkers }}
- "--pod-cleanup-workers"
- {{ . | quote }}
{{- end }}
{{- with .Values.controller.cronWorkflowWorkers }}
- "--cron-workflow-workers"
- {{ . | quote }}
{{- end }}
{{- with .Values.controller.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}

View File

@@ -6,7 +6,7 @@ metadata:
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "argo-workflows.labels" (dict "context" . "component" .Values.controller.name "name" .Values.controller.name) | nindent 4 }}
app.kubernetes.io/version: {{ default (include "argo-workflows.defaultTag" .) .Values.controller.image.tag | trunc 63 | quote }}
app.kubernetes.io/version: {{ include "argo-workflows.controller_chart_version_label" . }}
{{- with .Values.controller.serviceLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}

View File

@@ -38,6 +38,7 @@ rules:
- workflowtaskresults
verbs:
- create
- patch
- apiGroups:
- argoproj.io
resources:

View File

@@ -441,6 +441,8 @@ spec:
type: object
securityToken:
type: string
useSDKCreds:
type: boolean
required:
- key
type: object
@@ -470,6 +472,17 @@ spec:
type: object
bucket:
type: string
caSecret:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
createBucketIfNotPresent:
properties:
objectLocking:

View File

@@ -1,6 +1,6 @@
{{ range .Values.extraObjects }}
---
{{- if typeIs "string" . }}
{{ if typeIs "string" . }}
{{- tpl . $ }}
{{- else }}
{{- tpl (toYaml .) $ }}

View File

@@ -15,6 +15,7 @@ spec:
{{- if not .Values.server.autoscaling.enabled }}
replicas: {{ .Values.server.replicas }}
{{- end }}
revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "argo-workflows.selectorLabels" (dict "context" . "name" .Values.server.name) | nindent 6 }}
@@ -36,6 +37,10 @@ spec:
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.server.hostAliases }}
hostAliases:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.server.extraInitContainers }}
initContainers:
{{- tpl (toYaml .) $ | nindent 8 }}
@@ -48,10 +53,16 @@ spec:
{{- toYaml .Values.server.securityContext | nindent 12 }}
args:
- server
- --configmap={{ template "argo-workflows.controller.fullname" . }}-configmap
- --configmap={{ template "argo-workflows.controller.config-map.name" . }}
{{- with .Values.server.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if .Values.server.authMode }}
- "--auth-mode={{ .Values.server.authMode }}"
{{- end }}
{{- range .Values.server.authModes }}
- "--auth-mode={{ . }}"
{{- end }}
- "--secure={{ .Values.server.secure }}"
{{- if .Values.singleNamespace }}
- "--namespaced"
@@ -106,7 +117,7 @@ spec:
{{- end }}
volumes:
- name: tmp
emptyDir: {}
{{- toYaml .Values.server.tmpVolume | nindent 8 }}
{{- with .Values.server.volumes }}
{{- toYaml . | nindent 6}}
{{- end }}

View File

@@ -26,6 +26,9 @@ nameOverride:
# -- String to fully override "argo-workflows.fullname" template
fullnameOverride:
# -- Labels to set on all resources
commonLabels: {}
# -- Override the Kubernetes version, which is used to evaluate certain manifests
kubeVersionOverride: ""
@@ -88,6 +91,12 @@ controller:
# -- Allows controller to create and update ConfigMaps. Enables memoization feature
writeConfigMaps: false
configMap:
# -- Create a ConfigMap for the controller
create: true
# -- ConfigMap name
name: ""
# -- Limits the maximum number of incomplete workflows in a namespace
namespaceParallelism:
# -- Resolves ongoing, uncommon AWS EKS bug: https://github.com/argoproj/argo-workflows/pull/4224
@@ -121,7 +130,7 @@ controller:
# -- Service metrics port name
servicePortName: metrics
# -- ServiceMonitor relabel configs to apply to samples before scraping
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
relabelings: []
# -- ServiceMonitor metric relabel configs to apply to samples before ingestion
## Ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
@@ -158,6 +167,10 @@ controller:
# passwordSecret:
# name: argo-postgres-config
# key: password
# ssl: true
# # sslMode must be one of: disable, require, verify-ca, verify-full
# # you can find more information about those ssl options here: https://godoc.org/github.com/lib/pq
# sslMode: require
# -- Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level.
# Only valid for 2.7+
@@ -173,6 +186,13 @@ controller:
# -- Number of workflow workers
workflowWorkers: # 32
# -- Number of workflow TTL workers
workflowTTLWorkers: # 4
# -- Number of pod cleanup workers
podCleanupWorkers: # 4
# -- Number of cron workflow workers
# Only valid for 3.5+
cronWorkflowWorkers: # 8
# -- Restricts the Workflows that the controller will process.
# Only valid for 2.9+
workflowRestrictions: {}
@@ -281,6 +301,8 @@ controller:
volumes: []
# -- The number of controller pods to run
replicas: 1
# -- The number of revisions to keep.
revisionHistoryLimit: 10
pdb:
# -- Configure [Pod Disruption Budget] for the controller pods
@@ -436,6 +458,12 @@ server:
# -- Service port name
servicePortName: "" # http
# -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files
hostAliases: []
# - ip: 10.20.30.40
# hostnames:
# - git.myhostname
serviceAccount:
# -- Create a service account for the server
create: true
@@ -458,6 +486,8 @@ server:
resources: {}
# -- The number of server pods to run
replicas: 1
# -- The number of revisions to keep.
revisionHistoryLimit: 10
## Argo Server Horizontal Pod Autoscaler
autoscaling:
# -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo Server
@@ -522,14 +552,16 @@ server:
# - name: FOO
# value: "bar"
# -- Extra arguments to provide to the Argo server binary, such as for disabling authentication.
# -- Deprecated; use server.authModes instead.
authMode: ""
# -- A list of supported authentication modes. Available values are `server`, `client`, or `sso`. If you provide sso, please configure `.Values.server.sso` as well.
## Ref: https://argoproj.github.io/argo-workflows/argo-server-auth-mode/
authModes: []
# -- Extra arguments to provide to the Argo server binary.
## Ref: https://argoproj.github.io/argo-workflows/argo-server/#options
extraArgs: []
# If you want to disable authentication for purposes such as:
# - local dev-mode without authentication
# - gateway authentication through some other service such as KeyCloak
# uncomment the lines below and comment out the default empty list `extraArgs: []` above:
# extraArgs:
# - --auth-mode=server
logging:
# -- Set the logging level (one of: `debug`, `info`, `warn`, `error`)
@@ -539,6 +571,9 @@ server:
# -- Set the logging format (one of: `text`, `json`)
format: "text"
# -- Volume to be mounted in Pods for temporary files.
tmpVolume:
emptyDir: {}
# -- Additional volume mounts to the server main container.
volumeMounts: []
# -- Additional volumes to the server pod.
@@ -631,8 +666,7 @@ server:
# SSO configuration when SSO is specified as a server auth mode.
sso:
# -- Create SSO configuration
## SSO is activated by adding --auth-mode=sso to the server command line.
# -- Create SSO configuration. If you set `true` , please also set `.Values.server.authMode` as `sso`.
enabled: false
# -- The root URL of the OIDC identity provider
issuer: https://accounts.google.com
@@ -647,7 +681,7 @@ server:
# -- Key of a secret to retrieve the app OIDC client secret
key: client-secret
# - The OIDC redirect URL. Should be in the form <argo-root-url>/oauth2/callback.
redirectUrl: https://argo/oauth2/callback
redirectUrl: ""
rbac:
# -- Adds ServiceAccount Policy to server (Cluster)Role.
enabled: true
@@ -673,6 +707,11 @@ server:
userInfoPath: ""
# -- Skip TLS verification for the HTTP client
insecureSkipVerify: false
# -- Filter the groups returned by the OIDC provider
## A logical "OR" is used between each regex in the list
filterGroupsRegex: []
# - ".*argo-wf.*"
# - ".*argo-workflow.*"
# -- Extra containers to be added to the server deployment
extraContainers: []