commit 6124fe3112f8677c4da4691dffe4063c5438929c Author: exhsgahm Date: Wed Sep 7 14:33:28 2022 +0900 init diff --git a/architecture/datasaker-dev-1st.drawio b/architecture/datasaker-dev-1st.drawio new file mode 100644 index 0000000..c8f62b5 --- /dev/null +++ b/architecture/datasaker-dev-1st.drawio @@ -0,0 +1 @@ +7VrbcuI4EP0aHkPZlm88ggnJ1CQz2TCX3XmhhC1sbYTlsQWGfP1KRgaMxISpxYGq3UpVsFtyq93ndKtb0AHBfHWXwyx5pBEiHcuIVh0w7FiW6boW/xCStZT4DthI4hxHUrYTjPErkkJDShc4QkVjIqOUMJw1hSFNUxSyhgzmOS2b02aUNFfNYIwUwTiERJV+xxFLNlLfMXbye4TjpF7ZNOTIHNaTpaBIYETLPRG47YAgp5RtruarABHhvdovm+dGR0a3huUoZac88PeXu58fnwJi//FcosHz6wQHP26kliUkC/nCyyy8iSCDBXxBubScrWt3FC+IheK9jA4Y0AUjOEXB1vVCGOcwwtykgBKac1lKU/7sIGFzwu9MflkmmKFxBkOhs+S84bIZTZkE37Tqe7ms0Mqdl4nr+SoWROvCsrC7cU4XWbXkBw6/dnTCX0c8znL6gmqTOhbwvZ7Zc8VCmJADU5coZ5gzoE9wLLQyKhaB8o6gGRMauf04jR+quyEwpM26JSJYJCiSL6KiVkPAV0WrPZFE8Q7ROWL5mk+Ro7YklAypm17NsHJHUM+XsmSPnB6QQiiDIt7q3vGGX0jq/AaNLIVGCnM4Gmm0dYOOA3sUacFLpuGCrqM4iick1VGm1ZajgOIonDKUp4gpDoNFtomqGV4Jvw0ylGNuBRL84kvxDIiedqJ999VcDVG6GdvmHuOQj5LHXwTHh4AL8LzKiPXnEM9j/qYET/l/GDK8RJMI59wyKtwxqs2fhIQuom6xjM8Dn2l4NVo7BH2V6I6G58CsHz07fraCX4xZspjehLhjuURkhinPmm7Mtm7Yg7TOLA9wisgTLTDDVd6aUsbovInhr7PQDtlGZpMZbD+p8UxkGIPbkaXLoq+LHHX5G0xylAlzBKbtwWfpAlAHoNNW+DkKfDwtMc0+p93aGoHzJpYnArgHuWaTA11poALqyOGllH18wz1HCvW9JoR+Tw1AoAvAtvBzFfz6D2qYXQ94MMsIVyO0TwiF0WQKCUzDC+EJgH1leHrqdhiXV4zndreLIUMlXF8mLC3rMLf2wIWB9BUgv/dHVwxkCWcqdp7TC8Q7txmC/mHxbqrI2Rrk7LaQ6ynIRfNXBbmixHOeuVBVccC87tXO1NFoqgVf4xdgbInfqNZb842plnt1lZeYdZU3fPxRnV6IXlQO8sV247UwqwUid1THGMLkMERFIQ5Idi23UZXSe7qyo6qmsKgChCtKI2Foxq8D4R+Y5XS1PknJjIpF+fxoEUptlR4DC8N4pHILYX5E1wFPOODsIPzeKlClSDYiXOActiU31rGWkXKOzUjV2SQ4ihDPAYNmo9kOOz3Nxmn2NGFrniPjfv/5Zxb2+8/jcP1wH/To509fnzQnN2NwxQm3AGq+vTUcywat5lvHdpvImYb9fglXC5x6VlLF+yynqXoIcD0A7hmpL3rcdoseVznPuTSQ6llO5aMSihPSKwdyY+QlKiDXuTog1V2ewPk0glcMojTwEu2HZ10SwM/fZtkyfXWegzLy6RA7j3it2Qu/PQXqeRxKo4xiTZZ9+1uNg7N9/jf6VZwcoGIPLa/fP1YMtU2gt75bUQ+aadXn3i65suI4C+3u1qHn2auV3ugdu9pk+uz7PWqkg8+Du2+j4HEQPmp4dTsaX3FWQLPiIlUWuOjmrIVOzenbDusmQks1BbTb4JoHDnItW0NtfXvr2i35SD0N13S3wle/091Wzm22s5SQ6vuiurXtntSWmnyacc+n9E+f/tEf/3fbVIVjmiMUy9M1qa757xn26Xno97wf42kyG97l3l9fM8PRlMi7KMQw/P0oVNyhcdrpHrIBeNco1ProlDOmDzD4PwrfKwrPzDGz114U8tvdD4iqsb3fYYHbfwA=7V1bk6O6Ef41rkoe7OJ+eRx77MlJNtlJ5tTZy4tLBhkrg4EF+TL+9ZG4GSR5PVsDBuewNbVjGpBE99efWt3CM1Jn2+NTDKLNP0MX+iNFco8j9XGkKLJhKOQXlbzlEktXM4kXIzeXnQUv6ARzoZRLd8iFSe1CHIY+RlFd6IRBAB1ck4E4Dg/1y9ahX+81Ah7kBC8O8HnpF+TiTSa1dOks/xtE3qboWZbyM1tQXJwLkg1ww0NFpM5H6iwOQ5x92h5n0KfaK/SS3be4cLYcWAwD/J4bzMPz7Ktuy9rX8VMAXM9Qfz+Mldw+e+Dv8ifeR87YBRgk4BXG+dDxW6GP5BVihz6YNFKn4Q77KICzUvdU6MXARWRMs9APYyILwoDcO93grU+OZPLxsEEYvkTAoW0eCHCIbB0GOLe+rBTHebe0VaK9iH7eHj2KtAk4JNrEi8NdlHb5G7G/8OySPA69HcfhKyyGNFJUy7Rl26AdId9nhrqHMUYEAg8+8mirOKSdgPzIh2tMWyTjR4H3KT16VKV8zKIuXJBsoJs/CG+23JK0V3isiHIzPsFwC3H8Ri7Jz2o5onKfGpu2lQkOZ4SaZn7RpoJOW84vBLlXeGXbZ+CQDzl2fgVHKocjcDrDaLy6fyCBPUA+WCEf4bflKRsLBytd06eG1iqsyi5KWMkNwUqzGVxJKocrVZd5XJmm2hautJ/jCgy46j2udJauRLDSzFvCSuZQRUyBYRGbVOAkRFCN0Av1fwIr6D+HCcIoNfUqxDjcXrWPQxRL5tka/gRYUifpCJdkiHUQEMstdBJVGZeR3YANLbNuw3ImqdhQE8w4hax5YuAsmKyCsbs9CSghClGA0wHoU/JDhjSTRjo5M6NHE0VnBOyxWRfI/BFtoy5gj826QGabl5n+ZXaAFQF3VGteYvqXKgMkPzclRUJ6GJC+4ryN1BIwnu9hZpDsGt8HUYJW5V0xdHZxgvbwPzDJGv8QwSa0OUqu54tfUn4tGmbJVtEsS2bJlsjn9kKdG80xbtlP0wGiwTCuYfDeqqoCd5VbCxD5+LDwVz40HPx18Nc/lb9qhvwOh1XsWzosH3gj79Dj8CgFagDx0gMYHsDbhTBJazVM0iSrbkhLECeJiFdtK07SOTNyNiReFrilvUS0UdE91WnBGGCHw1bVabPzmMpr0xAlOsy2vMLg1LkCCUX0eJfAWO6xf0BH6cQlFINxCUPp2CUsPhTp87ovEaz45pKuaGqrdtN1rWdLPpuzm+OHO3cdk1m6x/arDLKLlbvG+l/ndixKO7U5yfCpelYx+eTh9MkzCQY0zlSkVRi7MNOa8WNHayY0UC4+0gIQ6TkP4S5fVbZNdG9XeiExbRZsVrVS3CYbqrANP/u9ON98qXm8Ct037sli4Q3srW7dLOWgTH5Mh8NhUqZGJw4B8XmQtJ1Lw6OnxYNpaaB3MUgQofvRJg1672O0wCNctgVBI6MthSIHo/KMPQo5M0cQBsXXI152Gtgi16UtsOvHJrharVO1LAkCXlkU8JZl5+bZ+kKunC8N92fKzQfYyQrQYmxomh2HuzKfKg8ACZikXi/n95GzJMPsdDUvs5UroyhHdmZLPivz8Ikntv6YEUSRT5qhrS/9ELjLFfBB4HTknGXOuz/OyScUOGv+Un6mCS0pjJZkybQmOj8RWbpgIlJaU5XJqarI+XEqA0mUAX+NjlRz0wjGiAwjXUGQvlCUwOezqKpADuLlvi3OifLlxu/UNR7pahxt091kxe9HtPXIo/poRf4HDkZ7uHRRTEYWUn0sypRlukScJHuvIQMWhjkb0ObNp4twLhd2bt58fNrFQ3izW40dJFr+MRa9Slg/idgucFe9jpCzDENKkjSdLxQRu512MZyQJ1jGMKLDoSZtyHrmhLOfKnJAkQX11tzvQvrlAOh2m/7OQJVBctY1dXtGVdDilGOwm/U6T78ofPrFB9uVy++b6I8R8wF2ETOYRfzcHwPyK7IvD4seW+8A1p34nmroLJOKtsq2Zbwf/5h/+/7HE1iv4frr6XhSgbUQ7DwrJj8STQQ1CxaZEydTzAM5GXurv6gG3SegSHLxQZH+mmU9mEwL3WBRyYVk7TMJuDECzrADY9iB8cEdGLJmzqcPvJPPjYWysK4yybt3YJT91HhMUJNugD00ndkOrYhW/6pgO3Qjy0Uhe/CpnO7ZY9hvObDHPbNHE0G+9B6mEIQZ7TEFv9+Sc9P72fcjW7w+hft+ZKstffKJ11cr4VRae/mkor0a4FJNJ4xj15GvWKY+V0S+sk7/Fa5dIdmJJKW0OaHV6hqd5ozJSG2hNGNe5kr7QsNmejc5L2hEZmR000/t2pQaWY553a3SjBhMJijll2lUEg1IstWGz6xWymXI1eUMt3AR80oDALa4DKrNE0JLeWYhfPncKZ3EB/zeDr9B6EIxj94DoBV2h7KsC17iuiWi+XTygOgB0R9AtNkxoPn8ehSHDkyGKGPA9Lvr20bPWFqw13NA9YDqj6G6a6YWbIn7l2AXzvVvImDfL1OVxc9KIpcBzbpA0VDb5Z5rOariIQvUyec8zSjbMiHMMhGKcHcO/o1dAlavIUg/hPErt6voIqIbQKImqWxBSZjqaQmM0adwerSn3yP1Fe3U8HN43D8JCkp/PM/4DRUwcFPWax+lDBa1R8V8eODQm1/cNkCvJUz5jUJMxvRCUVOblApt5m0bNmNwyxcHd+ux9/RDie3xfpwsv23evv1X6l2pwYX7oVDJ9D+UGu6r1HCjQqVmKTx3tFaoFLKHqPzQNXsMhcqBPe6ZPRqgCrZQKWaKtgqVQqbgC2ucm95PoVLTeH22V6gU6pP/woehUDkUKt9bqFQNs9tlB/8+zFDWGZKFHyjraPLtEjRCRA+l9wHRjSJaVTtGNF96H2o6A6h/sabTN5oeyu8DqJsGdedMLfjuh6FS+SetVApzPS2B0f33Z2f7Y2FKfx/r5ny++/L5NBZUKueLFw6L/XltEa4TQaqv/W/eU9lF+S3fOPVm4PQEE1/yvj2f/O8GnoJAYLmtt71SSh4mxv+riZGDrwDklxFtM99BeMtoT4hofl4cED0g+gOIbjHUI4fnvxKWnqv8sTV1/j8=7Vlhk6I2GP41ftQhBBA+rq5e78a7Xs/eXL85EbKYNRIaoqi/vgkEFILddqrVTnfHmSUP4c3L+zxPSKAHx5v9B47S1WcWYdqzrWjfg8892waeZ8t/CjloxHdhicScRBo7AXNyxBq0NLolEc4aHQVjVJC0CYYsSXAoGhjinOXNbi+MNkdNUYwNYB4iaqI/SCRWJeq71gn/CZN4VY0MLH1mg6rOGshWKGL5GQQnPTjmjInyaLMfY6qqV9WlvG564WydGMeJ+CsXDL//+j1Gr6uPx30YrTfT599/5n0dZYfoVt/wLg37ERIoQ2vMdebiUJUjW2MRqvuyenDEtoKSBI/r0isw5igiMqUxo4xLLGGJvHa0EhsqW0Ae5isi8DxFoYqZS91I7IUlQpMP7Kqth1VRZfFSdbzZx0poA5RnziDmbJsWQ36U9HeeXcjbUZcLzta4SqlnQ38YgMBTAxFKW6nuMBdEKuCJklhFFUwNgnSL4hehIsr8SRLPitYztHTOXUNEKFvhSN+IyVpFgRwV788gzeIHzDZY8IPsos/62kHaUn0pObdE8pNCPV/LbnWmTgh87QztirgOfhKOPNDa6dbRLAn204Xj/DYK5tm3YP8F+3nfNnSEjicZ9dF/X0hohwhFS0KJOCyOZS6GrFzHHXnOTWVVD1HLClxJVsOmrIJqCj5TFQQdqgKVIK+uKnN2aqhq+a6qh1eV4wT3lNWnV/r50y9fk2Xgvabr/etxNpx2TFbZMulHm2PHPJUykogiAXckfzLPsdVz5Zmxag1stwW028MmAMyWitEE2u1hEwDt8KA1PmgneAYYrUZ4qzW+dZag/P2rnpKeEUiOxXWMggnMJztcElL2oRSlGVnWV3EcbnlGdvgbzsrg/8ifmQqnvHnqPC/sWQVue9V2fB+0vSrxSTCFE+96hq3HufbqAgC3aVh/aK4u7GGXYa+xuug0LLxoWPMR8G7Yd8P+rwzreODhDOsYhiVxbli1U5uN8lS1n6Elpl9ZRgQpiF8yIdjmTXJCrATYVHaHsuCgEGqCxSJGAufoYMph6voudC575xpEWn57Y2ebTMKufZ11Ix5dg0eDRGmzJKoJ65o3zoqvilpNGWgr2E3rCexWPX3HLKek1aynd6t6ekY9lyhTmu5vM8zBAzsEh/ZdTGF7bRLhnT0BzO2D9IDoeFn2OOzpBO8yq7VfVwX+vQk0l5MJEupd8yM/onZpuJBp3vUJZVstLu9vRvMJ9TQbPTCNKE2pDKOiLyhD0WKJKErCO5kTVsQ8jjnNR+SXDkLfftHXXn5De6rSuFRMY1/wUvyZrNSBbi2ft7Zg1U1W6ylw2obIFry0iUo5i7ahKHdQo7RzKyVXwjnja0OgtHV3ddq3WP36wZ2lWC12z6T442n6wHNLjjr0OnSD8Z8J/xqziOcOWi+NOr9JOR3sOX+fPdk8fTUtzp19fIaTPwA=7Vpdc9o4FP01zOw+wFjyJ4+BQDYzaafdZLd9ywhbMVqE5bUFhPz6SrYMWBJJdwOUzNBmJtaxfK9879G513Y67nD+fFOgfPqJJZh2oJM8d9zrDoQgCKD4JZG1QiLfrZG0IInCtsA9ecEKdBS6IAkuWxM5Y5STvA3GLMtwzFsYKgq2ak97YrTtNUcpNoD7GFET/UYSPq3RyHe2+B+YpNPGM3DUmTlqJiugnKKErXYgd9RxhwVjvD6aPw8xldFr4lJfN95zdrOwAmf8Zy6YTpc3f919vcPfE1o+fHK+4s+kq6wsEV2oG17mcTdBHJVohgu1cr5uwlHOMI/lfTkdd8AWnJIMDzehl2BaoISIJQ0ZZYXAMpaJawdTPqdiBMThako4vs9RLG2uBG8E9sQyrpIPYDNWbqVVEbxcHs+fU0m0HlqVXi8t2CKvXN6K9FvPPorbkZfzgs1ws6QOdKOwD/qBdEQo1Za6xAUnggFXlKTSKmfSCVIjip+4tCjWT7L0rhpdu45as81FgsopTtSNmFlrUiC84ucdSGXxBrM55sVaTFFnI0UotaW6wGkottoyNGhmTXfY6fUViNSuSDfGt8QRB4o7/4FH0OARetnSqDv5+ERCS0QomhBK+PrxpV6LQSvf8weBd1RabVxsaAUOQyvPC9q86kPfoJULLLRyff9ItHJfpxW60OrsaRV5Z8cqz2BVBwZUxSlrESr4dyHr8yCuI3UlThbp5DdXbBXh2wHNAXR+r8LVzBdHaWVwknUb42Kttf361JbGK1bMZIawRSdzRjJeBcAfiB8Rp6HT8a+lVzHqyWC2AH0ctgFgjqSNNqCPwzYAdPNA8w/0Be4Axqhl3tH8OzsLFD8n3dJiy3IkfBXKRpUJXIyWuE5IPYdSlJdksrmqwPGiKMkS/4nL2vi75KGU5qQ0bCffV+rQGNalAnjhaHClSYXAR8EYjqPD6cXGT6u7kU6bGKIFZweqTH7U1pDIDw0NgaFFQ6B7rIbHP1sNMYviRUMuGvJxNOQAggEAPDvBCAzBIOnK2KpWbrbC08T+Dk0w/cJKwkmV+AnjnM3fTE6MJQHbzLYwy+1VRM0wf0wRxyu0Nukw9iPf9fbvnUMov+Prz7qe+azr2rrHY+UxtAi/lkSxzbJkkzCbbuwE31IyjxdP4GrvDiJgbgzf9uoARMcKaGQEdBaVrz/Y7YSvtTeqUJeaRLdJC6PQH0Gb6j1V/xqR3imXPcepCmAvcLXCqGqfhvataF1DtZn9PYbD6mpx3mIEaBiMtLlVkdP39GwxqbYzLnukqhSDfFMyUFlLAtUkZaMVb2qOoS52EToEg6H2PBlapP2kggDMVvDz3eB/vJnQqqP4P5bL2CcH+wmsU74xdOw68laH1NxkI3dg2yXIDmFfj5MXLFnE/Fan7e4cwWzZAT9ShpLHCaIoi1+n9EGKE+xp5cnWZ5yWjGabIaoRt7zJP59OQy3wl/QX5rt05xf3F8Csh1cWOTmf9KE8p8KMtG5sv9Nn1IXh2WW0b2T029X4jDO6QpYqEvr94Wvl6BC5CwxBtX7b8izZ8w6QvS//sGuypn/TT+HNy0v3+/Dh4dbyhZRkTwXqJnh5aVJP16RmLMH2p6UP0bX2NVUK+6drFKy0Nj/YXmh9ofW7aW15nXBSWpsfjEuEygurL6z+eVbD4NzE2vxgfWH1hdXvZfXxtFoMt3/XWJ3b+fNQd/QD7Ztbc9soFIB/jR/j0cW6+LF27W7b7GZ3M9PuPmWwRGTGWGgB3/rrF2QkSyDn0tixO0PimYgDOiD4DuccFPf88XL7iYJi/jtJIe55Trrt+R97nueGoSf+SMlOSeLA30syilIlOwju0Q+ohI6SrlAKWashJwRzVLSFCclzmPCWDFBKNu1mjwS3ey1ABg3BfQKwKf2OUj7fS+PAOch/gyibVz27jqpZgqqxErA5SMmmIfInPX9MCeH7q+V2DLGcvWpe9vdNj9TWA6Mw5y+5YRcPiE93j7d3X7/8tZ2TbxM2vVFa1gCv1AOvi+QmBRwwsIBUjZzvqulgC8gT+VxOzx+RFccoh+N66qUwoyBFYkhjggkVspzk4t7RnC+xKLnicjNHHN4XIJE6N4IbIXskOVeL73pVWXUrtYrJK+T1cptJ0Ppgwwb9jJJVUXb5WSx/Z+2DeBx5O6dkAash9Tw/jobuMJQdIYy1oa4h5UgQ8AGjTGrlRHYCVAnDRy41ivGjPLstSx99R425q4sUsDlM1YOYq1YtgegVbhsitYqfIFlCTneiiaqt2FMmdeM6FWKbA6FhrGTzBp1R1RAoq8hq5QdwxIVi5xUceQZH4McBo5vZrw8SWAOEwQxhxHcPP/ZjMbAKBsEoHJwVq7qLGiv3NFgN3LDN1dALDKx8twOr0A3OhJX/NFbAYnX1WAWDq6NqYFDFZnkTq0w8bseeVRCU83IwwUh8xJjHTi8QNWNZ6ssHawn0ctQWuGZJ6mgL9HLUFri6elfr39UH2BAYpZZ6R+vfaQxQfN7VvIT5cCD6okpHuRKQTtZwvyD7NhiDgqFZfReFyYoytIZ/Q7ZX/iZTZVKdNNND4/vSUivFutm6g2gy+qCZrZBPwqk3jU9nu3U/rUhDdlrNIVhxciIv4cVte44j37BnL+qwZ987lz0HL7Jn01lYe7b2/OvY8wmMN/auznZjw3ZRtjEstRPN1uxUU38LZhD/SRjiqFz3GeGcLJ9dmwRK/tpgd4Dl90tOc8gfMsDhBuxMGqZBHPiD46ZzgnX0o0BPAQdmCuh3LeS5EkDXPEkwVlGYWZ7WK9a1bzRmv8N9nW9C61Ob2jJ80zKCrpTai85lGa6ZUy9i9nTG05i/lnWUc820PbqNrRdHwcTr2vYey59ql274y77jlB6wH/qaZ1TOT5MOO6V7J6q1HB5RHJV3i/oOJa4mk5FKq23p5XSrXqxmpUFD1kelqxgVtc8AbL8pYG1TqXeLZ3cdY3/p3oZOgPBQy7Oijr39fXcEM3n/43b0Exm75h3F71QO49h2cJxfnfhK0bkdyXMRUvWQ1XbnHqIEGSEci3EKStJVwj/r1DbbCLA3hC4eMAHpwwxgkCdPE30O7xRHHSn/u6JoZvzCF/GO8+3rCTTUAC8RXgz0E+bh8NLRxdBYwCXJM5I+c7Bs/eEp/WFB0u7ATEd+idJU1p/ScZ6Aa8/Vzy6cC/vI6n1Mk+sd+w9bqi3VL3a3Vep1PVSbmUtBGM8otGhbtF8TiDh6JHlxtM2kRgUi+1NmS7el++XhiJayh358YbrNPKkMRyzblu1XBiVOdG1sm28JIQZMTA2DgCZzy7hl/JXRyTC8NsbDJwJvC7gF/LXht3ttgEcm4JQkkDFLt6X7dXQH+jvfcHhpus1/hkhAukaMUIu3xfut2eXF8TZf4uQkhRZti/Zbk8tLo+2b73FSxBYWbYv2W0Pud0R7vvrmbe8A8e/yf76vN/9+/bKOO74h2Do2sXBbuH864j7j6xxRPHy1tqxrfEPZn/wP7Vpdc9o4FP01PMJYNv7gsVBos5Pd2SkPfcwIWzEqsuWVxFd+/UpGNrZkknbXJHSGhJlYR/KVrHvOvVcOA2+WHb4wWKz/pAkiA9dJDgPv88B1QRC48o9CjhqJfO+EpAwnGjsDS/yCNOhodIsTxFsDBaVE4KINxjTPUSxaGGSM7tvDnilpz1rAFFnAMobERr/jRKxPaOQ7Z/wrwum6mhk4uieD1WAN8DVM6L4BefOBN2OUitNVdpghonav2pfTfYsLvfXCGMrFz9yw5z+O3/7YRJvlw9cfxSOY0++bobayg2SrH3hXxMMECsjhBjG9cnGstoNvkIjVczkDb0q3guAczeqtV2DKYILlkmaUUCaxnOby3ulaZES2gLzcr7FAywLGyuZe8kZizzQX2vnArdp6WmVVbl6hrrNDqog2gns+HqWMbotyygfp/s7eJ/k46nbB6AZVSxq4XhROwCRQE2FCjKXuEBNYMuATwamyKqiaBOoWQc9CWZTrx3n6WLY+e45ec9cUCeRrlOgHsb1WuUDOig4NSHvxC6IZEuwoh+jeintaUkPgVBTbnxkaRBpbN9gZVgOhVkVaGz8TR15o7vwCj1yLR/DlTKPh6vcnEtxBTOAKEyyOTy+ntVi08sf+NBhflVb1FDWtQD+0GoOgzauJ61u08kAHrQLgX4lW3uu0gnda3Tyt/PHNsWpssYqv8gatBOKiM2gVFOeiXI0/lR+56Jkz8GXPTLVG6slagNkO2wCwW8pGGzDbYRsApnlgzA/MBTYAq9Uy7xjzO40Fys+76kvqR0A5F9M2Sk8gNt+hk0NOYwiBBcer+i6G4i3jeIe+IX4y/r+0ypU5pdPz4GUp1cqwqVswDufTT4ZuJT4PFu4i6k+89TytUkNNWu0h3AraU5pwo7agI39iCdoNOwTtudcStP9zgrbTxV3Qd0H/PoLuQb2Re3PijSzx4nRvKbWTmq3dqbb+Ea4Q+ZtyLHDp9xUVgmZv+iZGin9tYncQyxuVPM2ReEqhQHt4tNmw8CPfG1+WTg9+9ELfPASO7UOg1+XIax0Bgf0uwfKilFme1B7rihuN3e/IX9fb0Pq9Ta0MYCvD7zpUu+G1lAHsU/Um4q+feRr711JHudfciNFt2rpR6M/drrD3XP5UUbqRL0eOU2bAUeAZmVEnPwOddKKnJGqMnFwwHJZ3y/4OI8DAVKnSGltmOVPVm+2qFDTiI1ymimlR5wzIT0GBGEGljhZvRh0rvnSHoR4oPDFOWmFHbH/fiGAf3/96nP6HM7uRHeXvQi3jUji4zF+T8ZWhayeStyqk6iGrcAfOVYKqEC7VOAWjyTYWDyZrm2MksfeUbZ4IhcnTChKYx68z+hrZKQrAB1PRPvPLXCQ63nDfTqGhF/gR5cXYfMc8mXxwdeHa1UVG85Qmq+HADYgqq1fSmUGqrhTXh9nb/8C4J8s+k2VBk+6qzdRDhpNE9feZVXsgvQvMBBp1vKp8V9LbBWB25P+QO+XvlO8nUTvhrVHerhkRkVU4jjmCLF7fqX+nfj8lziS4NerbNWpBuUgZuof8O+97470D3o33snn+flPZ1/iamDf/Fw== \ No newline at end of file diff --git a/architecture/datasaker-prod-1st.drawio b/architecture/datasaker-prod-1st.drawio new file mode 100644 index 0000000..3e99f58 --- /dev/null +++ b/architecture/datasaker-prod-1st.drawio @@ -0,0 +1 @@ +5Vrhc6o4EP9r/KgDBBA/Kmp789qrV9+1N++LEyFCrpHwIIr2r78Eg4qJr+2MPJ256UwLm7BZ9vfbzW5oC/jLzV0G0/iRhoi0LCPctMCwZVmm61r8j5BspcRzwE4SZTiUsoNgit+RFBpSusIhymsTGaWE4bQuDGiSoIDVZDDLaFGftqCkvmoKI6QIpgEkqvQVhyzeST3HOMjvEY7iamXTkCNLWE2WgjyGIS2ORGDUAn5GKdtdLTc+IsJ7lV92z43PjO4Ny1DCPvPAv9/vfn6b+MT+67lAg+f3GfZ/tKWWNSQr+cLrNGiHkMEcvqGsnWY0lOazbeWT/A2xQLyc0QIDumIEJ8jf+18IowyGmNvlU0IzLktowp8dxGxJ+J3JL4sYMzRNYSB0Fpw8XLagCZMMMK3qXi4rtHIPpuJ6uYkE2zqwyO1OlNFVWi75B+eAdnTG30k8zjL6hiqTWhbwuj2z54qFMCEnpq5RxjCnQZ/gSGhlVCwC5R1BCyY0cvtxEj2Ud0NgSJt1S4Qwj1EoX0SFrsKBr4o2RyIJ5R2iS8SyLZ8iR23JKhlX7V5Fs+LA0q4nZfERQ7tACqGMjGiv+0AefiH58wUuWQqXFOZwNJJw7wYdB44o0oCXTMMFHUdxFM9KqqNMqylHAcVROGEoSxBTHAbzdBdVC7wRfhukKMPcCiT4xZfiaRBNDqJj91VcDVCyG9snIOOUj5LH3wXHh4AL8LJMi9XfIV5G/E0JnvPfMGB4jWYhzrhlVLhjXJk/CwhdhZ18HV0GPtPoVmgdEPRUojsangOzevTi+NkKfhFm8WreDnDLconIDPOMX0Vs74YjSKvM8gDniExojhku89acMkaXdQx/nYUOyNYym8xgx0mNZyLDGIzGli6Lvq8y1OFvMMtQKswRmDYHn6ULQB2ATlPh5yjw8bQkHHmKlHZrqwXOh1h+EsAjyDWbHOhIAxVQxw6vp+zzG+4lUqjXrUPo9dQABLoAbAo/V8Gv/6CG2e2AB9OUcDVC+4xQGM7mkMAkuBKeANg3hmdX3Q6j4obx3O92EWSogNvrhKVlnebWHrgykJ4C5Gt/fMNAFnChYtd1er545yZD0Dst3k0VOVuDnN0Ucj0FuXD5riCXF3jJMxcqKw6YVb3ahToaTbXgafwCjD3xa9V6Y74x1XKvqvJis6ryho8/yiMM0YvKQb7YYbwSppVA5I7yLEOYHAQoz8UpSdV3iwMNUUof6UrPqprDvAwQrigJhaEpv/aFfyBv3zfbTylZULGoaPdXgdRW6jGwMIxHKrcQZmd0nfCEA85Owu+jAlWKZCPCBc5pW9K2zrWMlHNsQcrOJsZhiHgOGNQbzWbY2dVsnGZPE7bmJTLu689/0qDff54G24d7v0ef/vx7ojm+mYIbTrg5UPPtyHAsGzSabx3brSNnGvbvS7ha4NSzkjLeFxlN1EOA2wHwyEh90eM2W/S4ynnOtYFUz3JKHxVQnJDeOJA7I69RAbnOzQGp7vIELuchvGEQpYHXaD+61jUBfHpZpOvk3Xn2i9CjQ+w84q1mL3yZ+Op5HErClGJNlv34q8bJ2T7/Gf8qTk5QsYdWt98/Vww1TaCPvq2oB8207HNHa64sP89Cu7N36GX2aqU3+o1dbTx/9rweNZLB0+DuZew/DoJHDa9G4+kNZwW0yK9SZYGrbs5a6NScfviyGaK1mgKabXDNEwe5lq2htr69de2GfKSehqvd7aT8CvyV9nb33bje0FJCyi9GVXPb+VRjavJpxj2f0v/89G/e9P/bqCos0xyiWF1dm+qaX+YYvz38B0M5dvSPIGD0Hw==7Vzbcts2EP0azbQP0vB+ebRkyU3HbdI40yR90UAkRKGmCJqERNlfX4A3kQAUO2PSoqfKeCJxCQLL3bMHC2DtkT7bHm4SEG/+wD4MR5riH0b69UjTVMvS6AeTPJYSx9QLSZAgv5QdBXfoCZZCpZTukA/TVkOCcUhQ3BZ6OIqgR1oykCQ4azdb47A9agwCKAjuPBCK0q/IJ5tC6pjKUf4bRMGmGllVyjtbUDUuBekG+DhriPT5SJ8lGJPi2/YwgyGzXmWX4rnFibu1YgmMyEsesLNPs2+mqxrfxjcR8ANL/5KNtdI/exDuyjfex97YBwSk4B4mperksbJHeg+Jx15MGelTvCMhiuCstj0TBgnwEdVphkOcUFmEI/rsdEO2Ib1S6ddsgwi8i4HH+swocKhsjSNSel/VqutyWNYrtV7Mvm8PAUPaBGSpMQkSvIvzIT9Q/0vvLunrsMdJgu9hpdJI0x3bVV2LDYTCkFN1DxOCKASuQhSwXglmg4DyKoRrwnqk+qMouM2vrnWl1Fk2hA/SDfTLFxHdVnqSjQoPDVHpxhuIt5Akj7RJedcoEVXG1Nh2nUKQHRFq22WjTQOdllU2BGVUBHXfR+DQLyV2fgZHuoAj8HSE0Xj1/oEE9gCFYIVCRB6XT4UuAqxMw5xaRq+wqoeoYaV2BCvD5XCl6AKudFMVcWVWDbvHlfFjXIELrgaPK5OnKxmsDPstYaUKqKKuILDKTRpwkiKoReiV+W/BCoafcIoIyl29woTg7bP+8ahh6Tzbwp8ES/ok13BJVWyDgHpuYdKsyjqN7A586NhtH7qqOOUYkhmnknVPDIIH01U09rdPEkqIMYpIroA5pT9UpZkyMumdGbuaaCYn4K/ttkAVr1gfbQF/bbcFKt+9yo2v8go2BMJVq3uFG19pKEh/3pQUKekRQMdKyj5yT8BkvoeFQ4o2YQjiFK3qpxLo7ZIU7eFnmBadv4pgU9YdI9dj47ucX6uOebLVDMdRebKl8rm70OdWd4xbj9N1gmhxjFvnfU3G1SXhqqp9JYhifljFq5gaXuL1Eq//q3g1LPUFAau5bxmwYuKNgmzA6VEO1AiSZQAIzMDjiTTJ6DVNMhSn7UhHkifJiFfvK08yBTcKPqRRFvm1v2S00bA9s2nFGGBHcK/mdPl5TBetack2Ouy+osISzLkCKUP0eJfCRB1wfEBPO0tIaBYXEpZ25pBwxFRkyOu+VLLimyumZui9+s00jYEt+VzBb16Id/46obP0gP3XUPIcK3eDj7+z+7E62mnNSVbIzLNK6LeA5G9eSAhgeaamrHDiw8Jq1sOOnZmwRLn6yg6A6MhlCne6Vd03tb3bGIXmtEWy2bRK9Zhq6dI+wuJzcXz4VPdkhf1H4c0S6QP8o37bLbVStqhTlmWTemt04lEQH5Vk/ZxSj92WK9OTou9CSRCj92NNlvS+D21BQLlsC6JOtK2FsgBj8oI9Kjk3R1AGJc9nvPw0sEW+z3rg149dcLXepmpVkSS8qizhrY+du2frE3vl4tHwcKbcUsGzrAAdzoe2feZ0VxW3yiNAEyZl0Mv5fewtqZpnXc2r/MmVVR1Hns2X4q7M1a1IbMNxI4jjkHbDel+GGPjLFQhB5J0pOOs97+EEp7ihIHjzp/ZnurCSxllJVWxnYooTkWNKJiKtN1PZgqmqPT/BZCCNC+Cv0YFZbhrDBFE18hUEHQvFKfx0FDUNKEC8rtsSgqhcbnxhoXHNVuNom1eTVZ/XaBvQVw3Riv4PPIL2cOmjhGqGmT0W9ZZlvkScpPugIwdWjjk60BXdZ8pwrlZ+7t594rZLgMhmtxp7SLb84zz6LGH9IGM7wV3tc4SSZThSUpTpfKHJ2O1pl8AJfYNlAmOmDnNpR96zJ4L/dFkAyjxo9hZ+J7ZfMsDKbYY7AzWUFLxrm+6MmaDHKcfii/XOvv2iidsvIdiufLFuYjhOLBU8R85gV/nzcBworsi+Xi0G7L0MrM8Se7pl8kwqK5Xty3nxLZ4e3Ok/sX6Pdjr+iA/7G0nl2d+fZuL8ByM/P3sXvPp8eSN/aK1rix/ZmXOLca3ZV1enZse+4fNcfYKY13EFCicwaExqg3ZzOOJwrP6W57y79Ti4edASd7wfp8vvm8fv/yqSergKVDRLjVoYqnbkvMK3V/RmEqx+0S1Wf6IpavVFU34tdtO4HTxWuNPYYyv65zZ2x3GC/Utpz6W055WlPaphz6dXIk3NrYW2cJ7lmBeX9tTjtBhOUuzQAXkYJldnbziaSB66pM6+P/oQy/MGQB+XSt4Lfbxn+uhi+ai8hCokiUZ/VCFuQQth+n4qygxDtKe0okx1+rKnWKF376Q/zvsb1msBLrd0ygV2G/maY5tzTRYr6/xfFdoNkp0oSk6bE1YH0aLTkjE5qSuVFszLtXRPdGznT9P7kk5UTsbKyVptc2rkOeZ+t8r3WmE6QTm/TOOaaEBaLERCbiFTr1CeXekIaxo5r3QAYH7hoVv2eRce4gEGm8Uv+H07/EbYh3IefQ+A1vjad0N9u0NWKaLFc6YLoi+IfgWidf3MiBaP3uj6yoPpJc24gPrFpRNDo2nxPPIC6guoXwfqszO1pFjvT0mBV+enQKcRzcdA1VHfR0HPbVJVL1nBTj1u1IyKahzpNhPbWNx55AO/Bmy2oVDPcHIvFKydhHQHUDQUnT+rlO719ARG/6+P3vZhYSu/j017Pt99/fg0lpxVzhd3AhaHc84M16lkq6//X5XS+UX5W5YIZPNH4xaNb5b7p+93D4fP84e7UHIauI+pKYdcJPCK308UPCXx52nn2QofeD3+hiK9PP7Ft/xe4w/n6fP/AA==7Vptk5o6GP01ftQhBBA+rq725dre3tpO7zcnQhazRkJDFPXXN5GAQrB7Z6rVzt0dZ5Yc4MnDc87JC9qBw9X2DUfp4gOLMO3YVrTtwMeObQPPs+U/hew04ruwQGJOIo0dgSnZYw1aGl2TCGe1CwVjVJC0DoYsSXAoahjinOX1y54YrfeaohgbwDRE1ES/kUgsCtR3rSP+FpN4UfYMLH1mhcqLNZAtUMTyEwiOOnDIGRPF0Wo7xFRVr6xLcd/4zNkqMY4T8V9u6H/98jVGz4t3+20YLVfjx+9/866OskF0rR94k4bdCAmUoSXmOnOxK8uRLbEI1XNZHThga0FJgodV6RUYcxQRmdKQUcYllrBE3jtYiBWVLSAP8wUReJqiUMXMpW4k9sQSockHdtnW3aqosnipOl5tYyW0Hsozpxdztk4PXb6T9LeencnHUbcLzpa4TKljQ78fgMBTHRFKG6luMBdEKuCBklhFFUx1gnSL4iehIsr8SRJPDq1HaOmc27qIULbAkX4Qk7WSAtkr3p5AmsU3mK2w4Dt5iT7rawdpS3Wl5NwCyY8K9Xwtu8WJOiHwtTO0K+Iq+FE48kBrp11HkyTYjmeO8+8gmGafg+1H7Odd29AR2h9l1EV/vpDQBhGK5oQSsZvti1wMWbmOO/Ccq8qq6qKSFbiQrPp1WQXlEHyiKghaVAVKQV5cVeboVFPV/FVVd68qxwluKav3z/TD+38+JfPAe06X2+f9pD9uGayyedKNVvuWcSplJBGHBNyB/Mg8h1bHlWeGqtWz3QbQbPfrADBbKkYdaLb7dQA0w4NG/6CZ4AlgtGrhrUb/1kmC8vNbPSU9I5Dsi+sYByYwH21wQUhxDaUozci8uovjcM0zssGfcVYE/yV/Ziqc8ubx4unBnmXgpldtx/dB06sSHwVjOPIuZ9iqn0uvLgBw64b1++bqwu63GfYSq4tWw8KzhjWngFfDvhr2f2VYxwN3Z1jHMCyJc8Oqrdqslaes/QTNMf3EMiLIgfg5E4KtXiQnxEqAdWW3KAv2DkJNsJjFSOAc7Uw5jF3fhc5571yCSMtvbuxsk0nYtq+zrsSja/BokChtlkQVYW3jxknxVVHLIQOtBbtqPYHdqKfvmOWUtJr19K5VT8+o5xxlStPddYY5uGOH4NC+iSlsr0kivLEngLl9kB4QLS/L7oc9neBNRrXm66rAvzWB5nIyQUK9a77nKWqThjOZ5k1nKNtqcHl7M5oz1MNkcMc0ojSlMoyKPqMMRbM5oigJb2ROWBJzP+Y0p8iPLYS+/KKvufyG9lilca6Yxr7g6fBnslIFurZ8XtqClQ9ZrqfAcRsiW/DcJirlLFqHothBDdLWrZRcCeeMLw2B0sbTVWlfY/XrBzeWYrnYPZHit4fxHY8tOWrRa98Nhj8T/iVGEc/tNV4atX4n5bSw51yAvdlfQRJ+ySK83xC8+r7jbx9g6zebspr3zN8vrLENsloo/cksYPB3xYldNo9feh/Onfx2AI5+AA==7Vrbbts4EP0aA7sPMXS3/Bg7djfY7G7QFGj3KaAlRmZNi1qKvvXrS0rUjaSTZmPHDuA0QMUROUPNHJ4ZTdRzx8vtJwqy+V8khrjnWPG25970HMcOAof/JyQ7KQl9t5QkFMVS1gge0A8ohZaUrlAM885ERghmKOsKI5KmMGIdGaCUbLrTngjuWs1AAjXBQwSwLv2KYjYvpaFvNfI/IErmlWXbkneWoJosBfkcxGTTErmTnjumhLDyarkdQyy8V/mlXDfdc7feGIUp+5UFiy9g/s9n/yb6tgun2Z+fbr+svl9JLWuAV/KB11l0FQMGcrCAVO6c7Sp35AvIIvFcVs8dkRXDKIXj2vVCmFAQI76lMcGEcllKUr52NGdLzEc2v9zMEYMPGYiEzg3HDZc9kZTJ4NtONZZmhVbuvExcL7eJAFofbHKvn1CyygqTtzz8xruP/HHEckbJAlZb6jluOBjaw0AYQhgrW11DyhBHwDVGidDKiDAC5AjDJyY08v2jNLkrRjeuJfdsMhGDfA5j+SDS39wE3O4NpF3Dgx8sSJaQ0R2fIhd4ElDySF3ZVgWxTYPQIJSyeQud3lAKgTwVSa28AQ6/kNh5BY4cDUfgRwOjq9nHBxJYA4TBDGHEdo8/yr1osPI9fxR4R4VVbaKGlX0oWAVdXA0dX4OVaxtg5fL9HAdW7vOwAhdYnT+svLNDlaehqucEWPop7QAq+G8l8vMoKj11zW/SZPaby48Kt23Z1YVj/V64q5rPr5JC4Sy9qpTzvZb6y1sNjDNKYgNFZgSlrHh2f8R/uYvGVs+/EQb5qC/82BGo40FXYOsjoaMrUMeDrsBW1duKfVvdYEugjTrqLcW+1dog/33X08xPKwPcFpU6ikhAOlnDMiDlHIxBlqNZvYrCaEVztIafYV4qfxMz5EKdYIVm8kNBDJVilSVsbzAZXSssweWTYOpMw8NRRW2nU9gIo5UPwYqRA7GHHXbpI/RtjT6cgYE+HPdYtY5/jvShp8ILfVzo4+PQxwG4YuCcHVUEGlWgZKOdVCM0O96pXH8HZhDfkxwxVMR9RhgjyxdjE0GBvy6wDcBy+wVOU8geE8DgBux0NEz90He9/UfnAHF0A199wfX0F1zXVDIeK44DLWYwTmB15gllc5KQFOBJIx3xY5fGdQSbOXdExKaIwXfI2E4yicyZrQjBLWLfxHJOa+Xo39adm63UXAx2rcE9pIg/dkE/zdESG35tQCjEgHFG6qwzOVcuvRdE1wRyaCmBDD23qyMnKxpBuazdu3q1JgZoApmmqYh2/UT/HwChIecriOjG25Q3WrE1VEvHO1BDpWEU2jox+qZ+kR0e60ANNX8uwvz5t/mW9zrcWHg6VzJ0l7SccOBPHFPSeyp+qhzdqpb6llXUP/3AVeoiWfoo0qFRWpZQyszhHsWDYjW/b1BiKzInVOYWNY7K6YvVrKBzmPdRUSiMsrpiAHmZErCSUupc8WLO0bKLOQkdAMCh0kMYGDL7u+YDW+93inr8AuD3A3BKYmgm0o+AaHs4VCAdnrjEsfVe6wXSF0i/BdLWqSGtN3ozSiKYXyqNC6p/EdXOQK09Tk7Uev/xguoLqt+I6pNztd4q+/tu9DyijX0zpdvI/03FNva9Xu9HtHoGKkXHbsy91HGuHrKCnd10XfnI3dczFn8lWEXsVn0PbM/hUN8QunjEBMSPM4BBGj3/jngAKLqB01ebOyd/vdPbPZSDzfA91Pm0buUGT9Gw9fQvkk5NJ9UXUa0IXhvo5HzCB7IMczVCu3b83j+iPOGfXUT1TxW/Xk/POKIbYMgiA384fi4dHYJQHY1QjV8Ieoboea+PHh8237CW7fbmU2B38hM= \ No newline at end of file diff --git a/doc/1_how_to_install_vpc.txt b/doc/1_how_to_install_vpc.txt new file mode 100644 index 0000000..e69de29 diff --git a/doc/2_how_to_install_dev_cluster.txt b/doc/2_how_to_install_dev_cluster.txt new file mode 100644 index 0000000..e69de29 diff --git a/doc/3_how_to_install_iac_cluster.txt b/doc/3_how_to_install_iac_cluster.txt new file mode 100644 index 0000000..e69de29 diff --git a/doc/4_how_to_install_prod_cluster.txt b/doc/4_how_to_install_prod_cluster.txt new file mode 100644 index 0000000..e69de29 diff --git a/kops/dev.datasaker.io.yaml b/kops/dev.datasaker.io.yaml new file mode 100644 index 0000000..34450f9 --- /dev/null +++ b/kops/dev.datasaker.io.yaml @@ -0,0 +1,365 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-06T05:44:08Z" + name: dev.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudProvider: aws + configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io + containerRuntime: containerd + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.23.10 + masterPublicName: api.dev.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-03cbb88e181ccb46e + networking: + calico: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.21.1.0/24 + id: subnet-021536c4f12971c74 + name: ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.2.0/24 + id: subnet-0c90842daa15aa7c7 + name: ap-northeast-2b + type: Private + zone: ap-northeast-2b + - cidr: 172.21.3.0/24 + id: subnet-0ae3ab7ae241fe761 + name: ap-northeast-2c + type: Private + zone: ap-northeast-2c + - cidr: 172.21.0.0/28 + id: subnet-0d762a41fb41d63e5 + name: utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + - cidr: 172.21.0.16/28 + id: subnet-0b4f418020349fb84 + name: utility-ap-northeast-2b + type: Utility + zone: ap-northeast-2b + - cidr: 172.21.0.32/28 + id: subnet-05b9f4f02955c3307 + name: utility-ap-northeast-2c + type: Utility + zone: ap-northeast-2c + topology: + dns: + type: Public + masters: private + nodes: private + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:52:24Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-a +spec: + image: ami-0ea5eb4b05645aa8a + machineType: m5.4xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-a + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:55:36Z" + generation: 1 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-b +spec: + image: ami-0ea5eb4b05645aa8a + machineType: m5.4xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-b + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:58:51Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-c +spec: + image: ami-0ea5eb4b05645aa8a + machineType: m5.4xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-c + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2c + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T06:07:45Z" + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-mgmt-a +spec: + image: ami-0ea5eb4b05645aa8a + machineType: c5.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-a + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T06:09:31Z" + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-mgmt-b +spec: + image: ami-0ea5eb4b05645aa8a + machineType: c5.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-b + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T06:01:06Z" + generation: 1 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-a +spec: + image: ami-0ea5eb4b05645aa8a + machineType: c5.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-a + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T06:02:44Z" + generation: 1 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-b +spec: + image: ami-0ea5eb4b05645aa8a + machineType: c5.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-b + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T06:04:52Z" + generation: 1 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-c +spec: + image: ami-0ea5eb4b05645aa8a + machineType: c5.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-c + role: Node + rootVolumeSize: 100 + subnets: + - ap-northeast-2c + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:44:09Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2a +spec: + image: ami-0ea5eb4b05645aa8a + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: t3.small + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:44:09Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2b +spec: + image: ami-0ea5eb4b05645aa8a + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: t3.small + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2b + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-06T05:44:09Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2c +spec: + image: ami-0ea5eb4b05645aa8a + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: t3.small + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2c + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2c diff --git a/kops/iac.datasaker.io.yaml b/kops/iac.datasaker.io.yaml new file mode 100644 index 0000000..c3b5aa2 --- /dev/null +++ b/kops/iac.datasaker.io.yaml @@ -0,0 +1,113 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-02T12:52:55Z" + name: iac.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudProvider: aws + configBase: s3://clusters.iac.datasaker.io/iac.datasaker.io + containerRuntime: containerd + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.23.10 + masterPublicName: api.iac.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-06735fc8479f37b4b + networking: + calico: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.21.11.0/24 + name: iac-ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.10.0/24 + name: iac-utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + topology: + dns: + type: Public + masters: private + nodes: private + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-02T12:52:55Z" + labels: + kops.k8s.io/cluster: iac.datasaker.io + name: master-ap-northeast-2a +spec: + image: ami-054a058b04f721571 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: t3.small + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + role: Master + rootVolumeSize: 40 + subnets: + - iac-ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-02T12:52:55Z" + labels: + kops.k8s.io/cluster: iac.datasaker.io + name: nodes-ap-northeast-2a +spec: + image: ami-054a058b04f721571 + instanceMetadata: + httpPutResponseHopLimit: 1 + httpTokens: required + machineType: t3.small + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: nodes-ap-northeast-2a + role: Node + rootVolumeSize: 100 + subnets: + - iac-ap-northeast-2a diff --git a/scripts/temp.sh b/scripts/temp.sh new file mode 100644 index 0000000..e69de29 diff --git a/terraform/tf-datasaker/dev.tf b/terraform/tf-datasaker/dev.tf new file mode 100644 index 0000000..959adde --- /dev/null +++ b/terraform/tf-datasaker/dev.tf @@ -0,0 +1,130 @@ +resource "aws_route_table" "rt-datasaker-dev" { + tags = { + "Name" = "rt-datasaker-dev" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_route" "route-private-rt-datasaker-dev-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.natgw-datasaker.id + route_table_id = aws_route_table.rt-datasaker-dev.id +} + + + +resource "aws_subnet" "sbn-dev-a" { + availability_zone = "ap-northeast-2a" + cidr_block = "172.21.1.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dev-a.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-dev-b" { + availability_zone = "ap-northeast-2b" + cidr_block = "172.21.2.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dev-b.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-dev-c" { + availability_zone = "ap-northeast-2c" + cidr_block = "172.21.3.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dev-c.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_route_table_association" "rta-dev-a" { + route_table_id = aws_route_table.rt-datasaker-dev.id + subnet_id = aws_subnet.sbn-dev-a.id +} + +resource "aws_route_table_association" "rta-dev-b" { + route_table_id = aws_route_table.rt-datasaker-dev.id + subnet_id = aws_subnet.sbn-dev-b.id +} + +resource "aws_route_table_association" "rta-dev-c" { + route_table_id = aws_route_table.rt-datasaker-dev.id + subnet_id = aws_subnet.sbn-dev-c.id +} + +resource "aws_security_group" "sg-dev-datasaker" { + description = "Security group dev-datasaker" + name = "secg-dev-datasaker" + tags = { + "Name" = "sg-dev-datasaker" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.sg-dev-datasaker.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 8 + protocol = "icmp" + security_group_id = aws_security_group.sg-dev-datasaker.id + to_port = 8 + type = "ingress" +} + +resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 8 + protocol = "icmp" + security_group_id = aws_security_group.sg-dev-datasaker.id + to_port = 8 + type = "egress" +} + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "tcp" + security_group_id = aws_security_group.sg-dev-datasaker.id + to_port = 65535 + type = "egress" +} + + + + + + + + diff --git a/terraform/tf-datasaker/dmz.tf b/terraform/tf-datasaker/dmz.tf new file mode 100644 index 0000000..20a02f2 --- /dev/null +++ b/terraform/tf-datasaker/dmz.tf @@ -0,0 +1,249 @@ + +output "sbn_dmz_a_id" { + value = aws_subnet.sbn-dmz-a.id +} + +output "sbn_dmz_b_id" { + value = aws_subnet.sbn-dmz-b.id +} + +output "sbn_dmz_c_id" { + value = aws_subnet.sbn-dmz-c.id +} + + + +resource "aws_subnet" "sbn-dmz-a" { + availability_zone = "ap-northeast-2a" + cidr_block = "172.21.0.0/28" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dmz-a.datasaker" + "SubnetType" = "Public" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-dmz-b" { + availability_zone = "ap-northeast-2b" + cidr_block = "172.21.0.16/28" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dmz-b.datasaker" + "SubnetType" = "Public" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-dmz-c" { + availability_zone = "ap-northeast-2c" + cidr_block = "172.21.0.32/28" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-dmz-c.datasaker" + "SubnetType" = "Public" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + + +resource "aws_route_table_association" "rta-dmz-a" { + route_table_id = aws_route_table.rt-datasaker-pub.id + subnet_id = aws_subnet.sbn-dmz-a.id +} + +resource "aws_route_table_association" "rta-dmz-b" { + route_table_id = aws_route_table.rt-datasaker-pub.id + subnet_id = aws_subnet.sbn-dmz-b.id +} + +resource "aws_route_table_association" "rta-dmz-c" { + route_table_id = aws_route_table.rt-datasaker-pub.id + subnet_id = aws_subnet.sbn-dmz-c.id +} + + + +resource "aws_security_group" "sg-dmz-datasaker" { + description = "Security group dmz-datasaker" + name = "secg-dmz-datasaker" + tags = { + "Name" = "sg-dmz-datasaker" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dmz-datasaker-io" { +# cidr_blocks = ["0.0.0.0/0"] +# from_port = 22 +# protocol = "tcp" +# security_group_id = aws_security_group.sg-dmz-datasaker.id +# to_port = 22 +# type = "ingress" +# } + +resource "aws_security_group_rule" "sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io" { + cidr_blocks = ["115.178.73.2/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.sg-dmz-datasaker.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io" { + cidr_blocks = ["115.178.73.91/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.sg-dmz-datasaker.id + to_port = 22 + type = "ingress" +} + + + + +# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dmz-datasaker-io" { +# cidr_blocks = ["0.0.0.0/0"] +# from_port = 8 +# protocol = "icmp" +# security_group_id = aws_security_group.sg-dmz-datasaker.id +# to_port = 8 +# type = "ingress" +# } + +# resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dmz-datasaker-io" { +# cidr_blocks = ["0.0.0.0/0"] +# from_port = 8 +# protocol = "icmp" +# security_group_id = aws_security_group.sg-dmz-datasaker.id +# to_port = 8 +# type = "egress" +# } + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "tcp" + security_group_id = aws_security_group.sg-dmz-datasaker.id + to_port = 65535 + type = "egress" +} + + + + +resource "aws_launch_template" "lt-dmz-bastion-datasaker" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 20 + volume_type = "gp3" + } + } + + + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kp-bastion-datasaker.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "lt-dmz-bastion-datasaker" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.sg-dmz-datasaker.id] + } + # tag_specifications { + # resource_type = "instance" + # tags = { + # "Name" = "lt-dmz-bastion-datasaker" + # } + # } + # tag_specifications { + # resource_type = "volume" + # tags = { + # "Name" = "master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io" + # } + # } + tags = { + "Name" = "lt-dmz-bastion-datasaker" + } + # user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io_user_data") +} + +resource "aws_autoscaling_group" "ag-dmz-bastion-datasaker" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.lt-dmz-bastion-datasaker.id + version = aws_launch_template.lt-dmz-bastion-datasaker.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "ag-dmz-bastion-datasaker" + protect_from_scale_in = false + + tag { + key = "Name" + propagate_at_launch = true + value = "ag-dmz-bastion-datasaker" + } + vpc_zone_identifier = [aws_subnet.sbn-dmz-a.id,aws_subnet.sbn-dmz-b.id] +} + + + +resource "aws_eip" "eip-natgw-datasaker" { + # instance = aws_instance.web1-ec2.id + vpc = true + + tags = { + Name = "eip-natgw-datasaker" + } +} + +resource "aws_nat_gateway" "natgw-datasaker" { + allocation_id = aws_eip.eip-natgw-datasaker.id + subnet_id = aws_subnet.sbn-dmz-a.id + + tags = { + Name = "natgw-datasaker" + } + + # To ensure proper ordering, it is recommended to add an explicit dependency + # on the Internet Gateway for the VPC. + depends_on = [aws_internet_gateway.igw-datasaker] +} + + + diff --git a/terraform/tf-datasaker/iac.tf b/terraform/tf-datasaker/iac.tf new file mode 100644 index 0000000..a9148f3 --- /dev/null +++ b/terraform/tf-datasaker/iac.tf @@ -0,0 +1,126 @@ +resource "aws_route_table" "rt-datasaker-iac" { + tags = { + "Name" = "rt-datasaker-iac" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_route" "route-private-rt-datasaker-iac-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.natgw-datasaker.id + route_table_id = aws_route_table.rt-datasaker-iac.id +} + + +resource "aws_subnet" "sbn-iac-a" { + availability_zone = "ap-northeast-2a" + cidr_block = "172.21.4.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-iac-a.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-iac-b" { + availability_zone = "ap-northeast-2b" + cidr_block = "172.21.5.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-iac-b.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_subnet" "sbn-iac-c" { + availability_zone = "ap-northeast-2c" + cidr_block = "172.21.6.0/24" + enable_resource_name_dns_a_record_on_launch = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "Name" = "sbn-iac-c.datasaker" + "SubnetType" = "Private" + "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_route_table_association" "rta-iac-a" { + route_table_id = aws_route_table.rt-datasaker-iac.id + subnet_id = aws_subnet.sbn-iac-a.id +} + +resource "aws_route_table_association" "rta-iac-b" { + route_table_id = aws_route_table.rt-datasaker-iac.id + subnet_id = aws_subnet.sbn-iac-b.id +} + +resource "aws_route_table_association" "rta-iac-c" { + route_table_id = aws_route_table.rt-datasaker-iac.id + subnet_id = aws_subnet.sbn-iac-c.id +} + +resource "aws_security_group" "sg-iac-datasaker" { + description = "Security group iac-datasaker" + name = "secg-iac-datasaker" + tags = { + "Name" = "sg-iac-datasaker" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.sg-iac-datasaker.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 8 + protocol = "icmp" + security_group_id = aws_security_group.sg-iac-datasaker.id + to_port = 0 + type = "ingress" +} + + +resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 8 + protocol = "icmp" + security_group_id = aws_security_group.sg-iac-datasaker.id + to_port = 8 + type = "egress" +} + +resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "tcp" + security_group_id = aws_security_group.sg-iac-datasaker.id + to_port = 65535 + type = "egress" +} + + + + diff --git a/terraform/tf-datasaker/vpc.tf b/terraform/tf-datasaker/vpc.tf new file mode 100644 index 0000000..707cb1f --- /dev/null +++ b/terraform/tf-datasaker/vpc.tf @@ -0,0 +1,93 @@ +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "files" + region = "ap-northeast-2" +} + +output "vpc_datasaker_id" { + value = aws_vpc.vpc-datasaker.id +} + +output "vpc_datasaker_cidr_block" { + value = aws_vpc.vpc-datasaker.cidr_block +} + + + +resource "aws_vpc" "vpc-datasaker" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.21.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "Name" = "vpc-datasaker" + } +} + +resource "aws_vpc_dhcp_options" "vpc-dhcp-datasaker" { + domain_name = "ap-northeast-2.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "Name" = "vpc-dhcp-datasaker" + } +} + +resource "aws_vpc_dhcp_options_association" "vpc-dhcp-asso-datasaker" { + dhcp_options_id = aws_vpc_dhcp_options.vpc-dhcp-datasaker.id + vpc_id = aws_vpc.vpc-datasaker.id +} + +resource "aws_internet_gateway" "igw-datasaker" { + tags = { + "Name" = "igw-datasaker" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_key_pair" "kp-bastion-datasaker" { + key_name = "kp-bastion-datasaker" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDv9Bk/20f0xHLQN1Mnub0VwsbRw7ggubeUZ+pUVaX9BD7uUud/ITktmTArbabLJLGgWx64la6+6VuQHauzX/cpMp4dVxoaySQDGPsB+V0WnXaq0pWop5BoJaPO75lpk/Kp7NFtn9x3315Rqmis1Df1UrQehMkqunnr2jWkil6iueAckztpsnqxlb8S+uVYiM7C4HsVx8XdOT3WtfUv+hzDlejy11nzi5T4HMT70O107N4g5CrEapluc7M3NfxCFhz5Gxu8P0dfJKLs9fFT4E8DRfGly5/cDcKbiJHSAZYRN6UwKr3z7LAw8aIW8JWflXn1fMZ92qdiT04kN8ZdVzyMpUiWMXJQPrfI2EHT/OHAympzKrXnT98oIqJANE4Eq72OG9Hrb6Tauk8Bde5/v3P9d7m5Zi9tx+01PZ1JQR+1dkJeV3Am6mjKWrxIowKPol2chnARoU7y1rEZGGi+09bD5hUq7KW6z61DUIlCMYF0Oq0IMs/voQP8zqpDmvSPNJc= hsgahm@ws-ubuntu" + tags = { + "Name" = "kp-bastion-datasaker" + } +} + + +resource "aws_route_table" "rt-datasaker-pub" { + tags = { + "Name" = "rt-datasaker-pub" + } + vpc_id = aws_vpc.vpc-datasaker.id +} + + +resource "aws_route" "r-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw-datasaker.id + route_table_id = aws_route_table.rt-datasaker-pub.id +} + +resource "aws_route" "r-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.igw-datasaker.id + route_table_id = aws_route_table.rt-datasaker-pub.id +} + + + + diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_masters.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..7a23370 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy @@ -0,0 +1,273 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::hostedzone/Z072735718G25WNVKU834" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifyVolume", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..aa71a3a --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy @@ -0,0 +1,50 @@ +{ + "Statement": [ + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:ModifyNetworkInterfaceAttribute", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key b/terraform/tf-kops-dev-20200907-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key new file mode 100644 index 0000000..b10a93b --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..7d229ec --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-a +InstanceGroupRole: Node +NodeupConfigHash: Q85TE/V9HxgnA5xKGRYrJfgXmGE5+rCZ1GJASWQ/GPE= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..ae52b6c --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-b +InstanceGroupRole: Node +NodeupConfigHash: kf3dJ1SjdlO0c/UC6L3UzWB73HR/Az7gIc1qy8Koisg= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..9b0a2f9 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-c +InstanceGroupRole: Node +NodeupConfigHash: bUQw6p3VmVBXzspF9eyfeIhthTy8JshdVjdM4O3TfGo= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..d391c83 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-a +InstanceGroupRole: Node +NodeupConfigHash: nnxeoTtGPiOtgDvp7cOTcBrm40EIMijn9OZMlwmlQ6I= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..b1a998f --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-b +InstanceGroupRole: Node +NodeupConfigHash: mN0L7VdMkoLAhv46mATyltMs5Kr9sI4BSgkg8PG+IJc= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..b9269bb --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-a +InstanceGroupRole: Node +NodeupConfigHash: Iq9X//Sll3FjhJvy7RIuzBvhmFs+AjtCzz8V97KAYWM= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..73b2b3d --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-b +InstanceGroupRole: Node +NodeupConfigHash: t2aCaXecWpS9pwLOKIb8kPih6JP5vPDaz62JVPOnJG8= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..bc90daa --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-c +InstanceGroupRole: Node +NodeupConfigHash: 9+uvjmv1ysTusIQEDm+zfrqfOfsZs+Sn6hUXJ5jl5xY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..81f3cef --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2a +InstanceGroupRole: Master +NodeupConfigHash: ymgfHaOypQ5PSYZMRskqJMEwwq0wytxVeCScbrEYXqQ= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..127adc3 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2b +InstanceGroupRole: Master +NodeupConfigHash: LossyPq4Na2LUuvlTDeLrVynoNeEIXBiZozuBvYcGJA= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..dd56fc9 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2c +InstanceGroupRole: Master +NodeupConfigHash: 1bFoJNbAl2IeiaRgh7X4jwmCV0ZYcDCAD5B+ZuU7oig= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_cluster-completed.spec_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000..446c1b6 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,252 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-06T05:44:08Z" + generation: 1 + name: dev.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io + configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io + containerRuntime: containerd + containerd: + logLevel: info + version: 1.6.6 + dnsZone: Z072735718G25WNVKU834 + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: main + version: 3.5.4 + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: events + version: 3.5.4 + externalDns: + provider: dns-controller + iam: + allowContainerRegistry: true + legacy: false + keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.21.3 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 + kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 115.178.73.2/32 + - 115.178.73.91/32 + kubernetesVersion: 1.23.10 + masterInternalName: api.internal.dev.datasaker.io + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.dev.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-03cbb88e181ccb46e + networking: + calico: + encapsulationMode: ipip + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 115.178.73.2/32 + - 115.178.73.91/32 + subnets: + - cidr: 172.21.1.0/24 + id: subnet-021536c4f12971c74 + name: ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.2.0/24 + id: subnet-0c90842daa15aa7c7 + name: ap-northeast-2b + type: Private + zone: ap-northeast-2b + - cidr: 172.21.3.0/24 + id: subnet-0ae3ab7ae241fe761 + name: ap-northeast-2c + type: Private + zone: ap-northeast-2c + - cidr: 172.21.0.0/28 + id: subnet-0d762a41fb41d63e5 + name: utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + - cidr: 172.21.0.16/28 + id: subnet-0b4f418020349fb84 + name: utility-ap-northeast-2b + type: Utility + zone: ap-northeast-2b + - cidr: 172.21.0.32/28 + id: subnet-05b9f4f02955c3307 + name: utility-ap-northeast-2c + type: Utility + zone: ap-northeast-2c + topology: + dns: + type: Public + masters: private + nodes: private diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content new file mode 100644 index 0000000..3a2e037 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content @@ -0,0 +1,792 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-attacher-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-provisioner-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-resizer-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-snapshotter-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-attacher-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-provisioner-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-resizer-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-snapshotter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-getter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-csi-node-role +subjects: +- kind: ServiceAccount + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-role +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: ebs-csi-node-sa + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - controller + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --k8s-tag-cluster-id=dev.datasaker.io + - --extra-tags=KubernetesCluster=dev.datasaker.io + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 + imagePullPolicy: IfNotPresent + name: csi-provisioner + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b + imagePullPolicy: IfNotPresent + name: csi-attacher + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 + imagePullPolicy: IfNotPresent + name: csi-resizer + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: ebs-csi-controller-sa + tolerations: + - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - emptyDir: {} + name: socket-dir + +--- + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content new file mode 100644 index 0000000..ba22b24 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content @@ -0,0 +1,69 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 530752f323a7573cedaa993ac169181c2d36d70e1cb4950d3c1a3347ac586826 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: 1060dbbcbf4f9768081b838e619da1fc3970ef2b86886f8e5c6ff3e2842c2aa3 + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml + manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4 + name: leader-migration.rbac.addons.k8s.io + selector: + k8s-addon: leader-migration.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 3e67c5934d55a5f5ebbd8a97e428aa6d9749812ba209a3dc1f1cb9449ee75c26 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 + - id: k8s-1.22 + manifest: networking.projectcalico.org/k8s-1.22.yaml + manifestHash: 35704fe8643eb1cf13079a6580590cb32c2b69daf2047787863308fc4c90e88f + name: networking.projectcalico.org + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 + - id: k8s-1.17 + manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml + manifestHash: 80c38e6bb751e5c9e58a013b9c09b70d0ca34383d15889e09df214090c52713c + name: aws-ebs-csi-driver.addons.k8s.io + selector: + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + version: 9.99.0 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..8e33a3a --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,385 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.8.6@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..d8dd7bc --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,140 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.24.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.24.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z072735718G25WNVKU834 + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.24.1@sha256:d0bff3dff30ec695702eb954b7568e3b5aa164f458a70be1d3f5194423ef90a6 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000..7f1e62c --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.24.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.24.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.24.1@sha256:dec29a983e633e2d3321fef86e6fea211784b2dc9b62ce735d708e781ef4919c + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000..0cde75e --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content new file mode 100644 index 0000000..86d68c7 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +rules: +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-provider-extraction-migration + resources: + - leases + verbs: + - create + - list + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system::leader-locking-migration +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-controller-manager +- kind: ServiceAccount + name: kube-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000..502c682 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content new file mode 100644 index 0000000..f86f4e8 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content @@ -0,0 +1,4778 @@ +apiVersion: v1 +data: + calico_backend: bird + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "assign_ipv4": "true", + "assign_ipv6": "false", + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + typha_service_name: none + veth_mtu: "0" +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-config + namespace: kube-system + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing intepreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + floatingIPs: + default: Disabled + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for VXLAN networking. Optional as Felix determines + this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + nullable: true + type: integer + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ipreservations + verbs: + - list +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + verbs: + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - kubecontrollersconfigurations + verbs: + - get + - create + - update + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - endpoints + - services + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - patch +- apiGroups: + - "" + resourceNames: + - calico-node + resources: + - serviceaccounts/token + verbs: + - create +- apiGroups: + - crd.projectcalico.org + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - caliconodestatuses + verbs: + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - ipamconfigs + verbs: + - get +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + verbs: + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-node + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-node + kops.k8s.io/managed-by: kops + spec: + containers: + - env: + - name: DATASTORE_TYPE + value: kubernetes + - name: WAIT_FOR_DATASTORE + value: "true" + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + - name: CLUSTER_TYPE + value: kops,bgp + - name: IP + value: autodetect + - name: IP6 + value: none + - name: IP_AUTODETECTION_METHOD + value: first-found + - name: IP6_AUTODETECTION_METHOD + value: none + - name: CALICO_IPV4POOL_IPIP + value: CrossSubnet + - name: CALICO_IPV4POOL_VXLAN + value: Never + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: CALICO_IPV4POOL_CIDR + value: 100.96.0.0/11 + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: ACCEPT + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_AWSSRCDSTCHECK + value: Disable + - name: FELIX_BPFENABLED + value: "false" + - name: FELIX_BPFEXTERNALSERVICEMODE + value: Tunnel + - name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED + value: "false" + - name: FELIX_BPFLOGLEVEL + value: "Off" + - name: FELIX_CHAININSERTMODE + value: insert + - name: FELIX_IPTABLESBACKEND + value: Auto + - name: FELIX_LOGSEVERITYSCREEN + value: info + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "9091" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "false" + - name: FELIX_WIREGUARDENABLED + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/node:v3.23.3@sha256:b356c2334729810de4781819ac7cf7cb05e49b8be9387e6bba2755df95d1cd84 + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-node + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + resources: + requests: + cpu: 100m + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /var/run/nodeagent + name: policysync + - mountPath: /sys/fs/bpf + name: bpffs + - mountPath: /var/log/calico/cni + name: cni-log-dir + readOnly: true + hostNetwork: true + initContainers: + - command: + - /opt/cni/bin/calico-ipam + - -upgrade + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.23.3@sha256:83db083069fc8612798feda6d9c3413f075ec44e29d302f3af0a11df1cef5823 + name: upgrade-ipam + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - command: + - /opt/cni/bin/install + env: + - name: CNI_CONF_NAME + value: 10-calico.conflist + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + key: cni_network_config + name: calico-config + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CNI_MTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: SLEEP + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.23.3@sha256:83db083069fc8612798feda6d9c3413f075ec44e29d302f3af0a11df1cef5823 + name: install-cni + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - command: + - calico-node + - -init + - -best-effort + image: docker.io/calico/node:v3.23.3@sha256:b356c2334729810de4781819ac7cf7cb05e49b8be9387e6bba2755df95d1cd84 + name: mount-bpffs + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs + mountPropagation: Bidirectional + name: sys-fs + - mountPath: /var/run/calico + mountPropagation: Bidirectional + name: var-run-calico + - mountPath: /nodeproc + name: nodeproc + readOnly: true + - command: + - sh + - -c + - echo Temporary fix to avoid server side apply issues + image: busybox@sha256:20142e89dab967c01765b0aea3be4cec3a5957cc330f061e5503ef6168ae6613 + name: flexvol-driver + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: calico-node + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /var/run/calico + name: var-run-calico + - hostPath: + path: /var/lib/calico + name: var-lib-calico + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + name: sys-fs + - hostPath: + path: /sys/fs/bpf + type: Directory + name: bpffs + - hostPath: + path: /proc + name: nodeproc + - hostPath: + path: /opt/cni/bin + name: cni-bin-dir + - hostPath: + path: /etc/cni/net.d + name: cni-net-dir + - hostPath: + path: /var/log/calico/cni + name: cni-log-dir + - hostPath: + path: /var/lib/cni/networks + name: host-local-net-dir + - hostPath: + path: /var/run/nodeagent + type: DirectoryOrCreate + name: policysync + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-kube-controllers + kops.k8s.io/managed-by: kops + name: calico-kube-controllers + namespace: kube-system + spec: + containers: + - env: + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + image: docker.io/calico/kube-controllers:v3.23.3@sha256:a1773f60d4bb15cbb6d73d2da9e6ab28c36fb863263f87160bf02de3bed43991 + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-kube-controllers + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: calico-kube-controllers + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000..4e8a971 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,118 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-csi-1-21 +parameters: + encrypted: "true" + type: gp3 +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-events_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-main_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_kops-version.txt_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000..3e940eb --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.24.1 \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-events_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-events_content new file mode 100644 index 0000000..14b3445 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-events_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-main_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-main_content new file mode 100644 index 0000000..281102f --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-etcdmanager-main_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000..e6ba6f9 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.24.1@sha256:b969a40a66d7c9781b8f393c4bd1cc90828c45b0419e24bf2192be9a10fd6c44 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content new file mode 100644 index 0000000..ab31b49 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content new file mode 100644 index 0000000..0645b77 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content new file mode 100644 index 0000000..b7277bb --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-c + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content new file mode 100644 index 0000000..e594dab --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content new file mode 100644 index 0000000..1171ef1 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content new file mode 100644 index 0000000..c5a273c --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content new file mode 100644 index 0000000..7f9896c --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content new file mode 100644 index 0000000..bd9d735 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7140152701493782195543542031" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-c + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content new file mode 100644 index 0000000..79b3746 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV + J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc + terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo + blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW + XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz + qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY + OwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX + DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj + l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT + INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu + OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3 + PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx + KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv + fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA + Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm + gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ + TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR + Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo + TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt + xAs2TdYn8CrGqWBeqo0hBw== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw + NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2 + rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3 + zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK + XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/ + 8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN + ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH + mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5 + LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV + oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT + o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L + 0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG + X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN + MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh + 91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74 + 9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU + 187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN + Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i + c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH + yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo + hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR + vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O + eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8 + TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My + 27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS + otB/s616SciuS4GfxB8= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS + EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s + jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F + GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r + CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L + b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/ + em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd + 4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h + +GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep + kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU + 98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR + CnTy7QvfVnofyg== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY + 3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B + Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op + wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s + NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC + HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/ + JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v + tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK + BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy + fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC + T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR + 5SKrimFkB4A+pg== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5 + MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY + WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa + CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph + IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp + snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c + 0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww + +ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs + rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV + RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0 + 3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R + WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW + SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+ + X0c+UsfH + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7140152701265059592804081616" + etcd-clients-ca: "7140152701334538361835018685" + etcd-manager-ca-events: "7140152701353152116999188294" + etcd-manager-ca-main: "7140152701343712646643132357" + etcd-peers-ca-events: "7140152701262321031184890800" + etcd-peers-ca-main: "7140152701288092082058775186" + kubernetes-ca: "7140152701493782195543542031" + service-account: "7140152701518733293461068249" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content new file mode 100644 index 0000000..fd515ca --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV + J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc + terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo + blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW + XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz + qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY + OwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX + DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj + l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT + INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu + OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3 + PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx + KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv + fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA + Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm + gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ + TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR + Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo + TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt + xAs2TdYn8CrGqWBeqo0hBw== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw + NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2 + rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3 + zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK + XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/ + 8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN + ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH + mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5 + LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV + oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT + o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L + 0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG + X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN + MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh + 91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74 + 9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU + 187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN + Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i + c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH + yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo + hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR + vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O + eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8 + TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My + 27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS + otB/s616SciuS4GfxB8= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS + EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s + jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F + GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r + CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L + b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/ + em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd + 4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h + +GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep + kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU + 98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR + CnTy7QvfVnofyg== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY + 3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B + Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op + wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s + NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC + HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/ + JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v + tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK + BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy + fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC + T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR + 5SKrimFkB4A+pg== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5 + MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY + WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa + CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph + IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp + snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c + 0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww + +ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs + rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV + RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0 + 3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R + WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW + SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+ + X0c+UsfH + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7140152701265059592804081616" + etcd-clients-ca: "7140152701334538361835018685" + etcd-manager-ca-events: "7140152701353152116999188294" + etcd-manager-ca-main: "7140152701343712646643132357" + etcd-peers-ca-events: "7140152701262321031184890800" + etcd-peers-ca-main: "7140152701288092082058775186" + kubernetes-ca: "7140152701493782195543542031" + service-account: "7140152701518733293461068249" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2b + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content new file mode 100644 index 0000000..1a8e485 --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV + J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc + terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo + blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW + XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz + qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY + OwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX + DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj + l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT + INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu + OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3 + PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx + KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv + fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA + Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm + gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ + TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR + Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo + TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt + xAs2TdYn8CrGqWBeqo0hBw== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw + NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2 + rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3 + zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK + XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/ + 8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN + ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH + mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5 + LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV + oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT + o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L + 0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG + X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN + MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh + 91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74 + 9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU + 187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN + Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i + c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH + yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo + hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR + vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O + eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8 + TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My + 27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS + otB/s616SciuS4GfxB8= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS + EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s + jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F + GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r + CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L + b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/ + em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd + 4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h + +GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep + kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU + 98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR + CnTy7QvfVnofyg== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy + MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY + 3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B + Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op + wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s + NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC + HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/ + JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v + tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK + BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy + fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC + T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR + 5SKrimFkB4A+pg== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5 + MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY + WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa + CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph + IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp + snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c + 0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww + +ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs + rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV + RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0 + 3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R + WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW + SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+ + X0c+UsfH + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/ + qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb + Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9 + m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+ + ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI + 8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn + aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH + WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs + 6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc + ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ + yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J + 5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7140152701265059592804081616" + etcd-clients-ca: "7140152701334538361835018685" + etcd-manager-ca-events: "7140152701353152116999188294" + etcd-manager-ca-main: "7140152701343712646643132357" + etcd-peers-ca-events: "7140152701262321031184890800" + etcd-peers-ca-main: "7140152701288092082058775186" + kubernetes-ca: "7140152701493782195543542031" + service-account: "7140152701518733293461068249" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2c + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200907-ip/kubernetes.tf b/terraform/tf-kops-dev-20200907-ip/kubernetes.tf new file mode 100644 index 0000000..a8a551c --- /dev/null +++ b/terraform/tf-kops-dev-20200907-ip/kubernetes.tf @@ -0,0 +1,2358 @@ +locals { + cluster_name = "dev.datasaker.io" + master_autoscaling_group_ids = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] + master_security_group_ids = [aws_security_group.masters-dev-datasaker-io.id] + masters_role_arn = aws_iam_role.masters-dev-datasaker-io.arn + masters_role_name = aws_iam_role.masters-dev-datasaker-io.name + node_autoscaling_group_ids = [aws_autoscaling_group.dev-data-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] + node_security_group_ids = [aws_security_group.nodes-dev-datasaker-io.id] + node_subnet_ids = ["subnet-021536c4f12971c74", "subnet-0ae3ab7ae241fe761", "subnet-0c90842daa15aa7c7"] + nodes_role_arn = aws_iam_role.nodes-dev-datasaker-io.arn + nodes_role_name = aws_iam_role.nodes-dev-datasaker-io.name + region = "ap-northeast-2" + subnet_ap-northeast-2a_id = "subnet-021536c4f12971c74" + subnet_ap-northeast-2b_id = "subnet-0c90842daa15aa7c7" + subnet_ap-northeast-2c_id = "subnet-0ae3ab7ae241fe761" + subnet_ids = ["subnet-021536c4f12971c74", "subnet-05b9f4f02955c3307", "subnet-0ae3ab7ae241fe761", "subnet-0b4f418020349fb84", "subnet-0c90842daa15aa7c7", "subnet-0d762a41fb41d63e5"] + subnet_utility-ap-northeast-2a_id = "subnet-0d762a41fb41d63e5" + subnet_utility-ap-northeast-2b_id = "subnet-0b4f418020349fb84" + subnet_utility-ap-northeast-2c_id = "subnet-05b9f4f02955c3307" + vpc_id = "vpc-03cbb88e181ccb46e" +} + +output "cluster_name" { + value = "dev.datasaker.io" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-dev-datasaker-io.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-dev-datasaker-io.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-dev-datasaker-io.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.dev-data-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-dev-datasaker-io.id] +} + +output "node_subnet_ids" { + value = ["subnet-021536c4f12971c74", "subnet-0ae3ab7ae241fe761", "subnet-0c90842daa15aa7c7"] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-dev-datasaker-io.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-dev-datasaker-io.name +} + +output "region" { + value = "ap-northeast-2" +} + +output "subnet_ap-northeast-2a_id" { + value = "subnet-021536c4f12971c74" +} + +output "subnet_ap-northeast-2b_id" { + value = "subnet-0c90842daa15aa7c7" +} + +output "subnet_ap-northeast-2c_id" { + value = "subnet-0ae3ab7ae241fe761" +} + +output "subnet_ids" { + value = ["subnet-021536c4f12971c74", "subnet-05b9f4f02955c3307", "subnet-0ae3ab7ae241fe761", "subnet-0b4f418020349fb84", "subnet-0c90842daa15aa7c7", "subnet-0d762a41fb41d63e5"] +} + +output "subnet_utility-ap-northeast-2a_id" { + value = "subnet-0d762a41fb41d63e5" +} + +output "subnet_utility-ap-northeast-2b_id" { + value = "subnet-0b4f418020349fb84" +} + +output "subnet_utility-ap-northeast-2c_id" { + value = "subnet-05b9f4f02955c3307" +} + +output "vpc_id" { + value = "vpc-03cbb88e181ccb46e" +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "files" + region = "ap-northeast-2" +} + +resource "aws_autoscaling_group" "dev-data-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-a-dev-datasaker-io.id + version = aws_launch_template.dev-data-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-021536c4f12971c74"] +} + +resource "aws_autoscaling_group" "dev-data-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-b-dev-datasaker-io.id + version = aws_launch_template.dev-data-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c90842daa15aa7c7"] +} + +resource "aws_autoscaling_group" "dev-data-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-c-dev-datasaker-io.id + version = aws_launch_template.dev-data-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0ae3ab7ae241fe761"] +} + +resource "aws_autoscaling_group" "dev-mgmt-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-a-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-021536c4f12971c74"] +} + +resource "aws_autoscaling_group" "dev-mgmt-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-b-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c90842daa15aa7c7"] +} + +resource "aws_autoscaling_group" "dev-process-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-a-dev-datasaker-io.id + version = aws_launch_template.dev-process-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-021536c4f12971c74"] +} + +resource "aws_autoscaling_group" "dev-process-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-b-dev-datasaker-io.id + version = aws_launch_template.dev-process-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c90842daa15aa7c7"] +} + +resource "aws_autoscaling_group" "dev-process-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-c-dev-datasaker-io.id + version = aws_launch_template.dev-process-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0ae3ab7ae241fe761"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2a-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2a.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-021536c4f12971c74"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2b-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2b.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c90842daa15aa7c7"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2c-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2c.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0ae3ab7ae241fe761"] +} + +resource "aws_ebs_volume" "a-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "a-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_elb" "api-dev-datasaker-io" { + connection_draining = true + connection_draining_timeout = 300 + cross_zone_load_balancing = false + health_check { + healthy_threshold = 2 + interval = 10 + target = "SSL:443" + timeout = 5 + unhealthy_threshold = 2 + } + idle_timeout = 300 + listener { + instance_port = 443 + instance_protocol = "TCP" + lb_port = 443 + lb_protocol = "TCP" + } + name = "api-dev-datasaker-io-ru2qna" + security_groups = [aws_security_group.api-elb-dev-datasaker-io.id] + subnets = ["subnet-05b9f4f02955c3307", "subnet-0b4f418020349fb84", "subnet-0d762a41fb41d63e5"] + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + role = aws_iam_role.masters-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + role = aws_iam_role.nodes-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "masters-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.dev.datasaker.io_policy") + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "nodes-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.dev.datasaker.io_policy") + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_masters.dev.datasaker.io_policy") + role = aws_iam_role.masters-dev-datasaker-io.name +} + +resource "aws_iam_role_policy" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy") + role = aws_iam_role.nodes-dev-datasaker-io.name +} + +resource "aws_key_pair" "kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6" { + key_name = "kubernetes.dev.datasaker.io-c8:01:5e:c8:c1:4f:2a:1b:71:6c:21:3a:5c:04:7b:d6" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key") + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_launch_template" "dev-data-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 100 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2a-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2b-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2c-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data") +} + +resource "aws_route53_record" "api-dev-datasaker-io" { + alias { + evaluate_target_health = false + name = aws_elb.api-dev-datasaker-io.dns_name + zone_id = aws_elb.api-dev-datasaker-io.zone_id + } + name = "api.dev.datasaker.io" + type = "A" + zone_id = "/hostedzone/Z072735718G25WNVKU834" +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "dev.datasaker.io/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content") + key = "dev.datasaker.io/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-bootstrap" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content") + key = "dev.datasaker.io/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "dev.datasaker.io/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "dev.datasaker.io/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-leader-migration-rbac-addons-k8s-io-k8s-1-23" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content") + key = "dev.datasaker.io/addons/leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-limit-range-addons-k8s-io" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content") + key = "dev.datasaker.io/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-networking-projectcalico-org-k8s-1-22" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content") + key = "dev.datasaker.io/addons/networking.projectcalico.org/k8s-1.22.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "dev.datasaker.io/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "dev.datasaker.io/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "dev.datasaker.io/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "dev.datasaker.io/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events_content") + key = "dev.datasaker.io/manifests/etcd/events.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main_content") + key = "dev.datasaker.io/manifests/etcd/main.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "dev.datasaker.io/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-a_content") + key = "dev.datasaker.io/igconfig/node/dev-data-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-b_content") + key = "dev.datasaker.io/igconfig/node/dev-data-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-c_content") + key = "dev.datasaker.io/igconfig/node/dev-data-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-a_content") + key = "dev.datasaker.io/igconfig/node/dev-process-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-b_content") + key = "dev.datasaker.io/igconfig/node/dev-process-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-c_content") + key = "dev.datasaker.io/igconfig/node/dev-process-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "api-elb-dev-datasaker-io" { + description = "Security group for api ELB" + name = "api-elb.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api-elb.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-03cbb88e181ccb46e" +} + +resource "aws_security_group" "masters-dev-datasaker-io" { + description = "Security group for masters" + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-03cbb88e181ccb46e" +} + +resource "aws_security_group" "nodes-dev-datasaker-io" { + description = "Security group for nodes" + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-03cbb88e181ccb46e" +} + +resource "aws_security_group_rule" "from-115-178-73-2--32-ingress-tcp-22to22-masters-dev-datasaker-io" { + cidr_blocks = ["115.178.73.2/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-115-178-73-2--32-ingress-tcp-22to22-nodes-dev-datasaker-io" { + cidr_blocks = ["115.178.73.2/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-115-178-73-2--32-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + cidr_blocks = ["115.178.73.2/32"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-115-178-73-91--32-ingress-tcp-22to22-masters-dev-datasaker-io" { + cidr_blocks = ["115.178.73.91/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-115-178-73-91--32-ingress-tcp-22to22-nodes-dev-datasaker-io" { + cidr_blocks = ["115.178.73.91/32"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-115-178-73-91--32-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + cidr_blocks = ["115.178.73.91/32"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-4-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "4" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-1to2379-masters-dev-datasaker-io" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-2382to4000-masters-dev-datasaker-io" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-4003to65535-masters-dev-datasaker-io" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-udp-1to65535-masters-dev-datasaker-io" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "https-elb-to-master" { + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-115-178-73-2--32" { + cidr_blocks = ["115.178.73.2/32"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-115-178-73-91--32" { + cidr_blocks = ["115.178.73.91/32"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 4 + type = "ingress" +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +}