From 8c9fa0bcb394aa382324578591889dc2a09d46f6 Mon Sep 17 00:00:00 2001 From: exhsgahm Date: Fri, 16 Sep 2022 12:25:27 +0900 Subject: [PATCH] update --- terraform/tf-datasaker/dev.tf | 9 +- terraform/tf-datasaker/dmz.tf | 15 +- terraform/tf-datasaker/iac.tf | 6 +- terraform/tf-datasaker/terraform.tfstate | 644 +-- .../tf-datasaker/terraform.tfstate.backup | 2151 +------- ...s_iam_role_masters.dev.datasaker.io_policy | 10 + ...aws_iam_role_nodes.dev.datasaker.io_policy | 10 + ...ole_policy_masters.dev.datasaker.io_policy | 273 + ..._role_policy_nodes.dev.datasaker.io_policy | 50 + ...8015ec8c14f2a1b716c213a5c047bd6_public_key | 1 + ...late_dev-data-a.dev.datasaker.io_user_data | 175 + ...late_dev-data-b.dev.datasaker.io_user_data | 175 + ...late_dev-data-c.dev.datasaker.io_user_data | 175 + ...late_dev-mgmt-a.dev.datasaker.io_user_data | 175 + ...late_dev-mgmt-b.dev.datasaker.io_user_data | 175 + ...e_dev-process-a.dev.datasaker.io_user_data | 175 + ...e_dev-process-b.dev.datasaker.io_user_data | 175 + ...e_dev-process-c.dev.datasaker.io_user_data | 175 + ...east-2a.masters.dev.datasaker.io_user_data | 275 + ...east-2b.masters.dev.datasaker.io_user_data | 275 + ...east-2c.masters.dev.datasaker.io_user_data | 275 + ...s_s3_object_cluster-completed.spec_content | 251 + ...-csi-driver.addons.k8s.io-k8s-1.17_content | 792 +++ ..._dev.datasaker.io-addons-bootstrap_content | 69 + ...ons-coredns.addons.k8s.io-k8s-1.12_content | 385 ++ ...-controller.addons.k8s.io-k8s-1.12_content | 140 + ...-controller.addons.k8s.io-k8s-1.16_content | 225 + ...let-api.rbac.addons.k8s.io-k8s-1.9_content | 17 + ...ration.rbac.addons.k8s.io-k8s-1.23_content | 52 + ...o-addons-limit-range.addons.k8s.io_content | 15 + ...working.projectcalico.org-k8s-1.22_content | 4778 +++++++++++++++++ ...-storage-aws.addons.k8s.io-v1.15.0_content | 118 + ...s3_object_etcd-cluster-spec-events_content | 4 + ...s_s3_object_etcd-cluster-spec-main_content | 4 + .../aws_s3_object_kops-version.txt_content | 1 + ...bject_manifests-etcdmanager-events_content | 61 + ..._object_manifests-etcdmanager-main_content | 61 + ...-static-kube-apiserver-healthcheck_content | 33 + ..._s3_object_nodeupconfig-dev-data-a_content | 70 + ..._s3_object_nodeupconfig-dev-data-b_content | 70 + ..._s3_object_nodeupconfig-dev-data-c_content | 70 + ..._s3_object_nodeupconfig-dev-mgmt-a_content | 70 + ..._s3_object_nodeupconfig-dev-mgmt-b_content | 70 + ..._object_nodeupconfig-dev-process-a_content | 70 + ..._object_nodeupconfig-dev-process-b_content | 70 + ..._object_nodeupconfig-dev-process-c_content | 70 + ...odeupconfig-master-ap-northeast-2a_content | 265 + ...odeupconfig-master-ap-northeast-2b_content | 265 + ...odeupconfig-master-ap-northeast-2c_content | 265 + .../tf-kops-dev-20200916-ip/kubernetes.tf | 2358 ++++++++ 50 files changed, 13524 insertions(+), 2589 deletions(-) create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_masters.dev.datasaker.io_policy create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_cluster-completed.spec_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-events_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-main_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_kops-version.txt_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-events_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-main_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content create mode 100644 terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content create mode 100644 terraform/tf-kops-dev-20200916-ip/kubernetes.tf diff --git a/terraform/tf-datasaker/dev.tf b/terraform/tf-datasaker/dev.tf index 959adde..3c63f75 100644 --- a/terraform/tf-datasaker/dev.tf +++ b/terraform/tf-datasaker/dev.tf @@ -16,13 +16,14 @@ resource "aws_route" "route-private-rt-datasaker-dev-0-0-0-0--0" { resource "aws_subnet" "sbn-dev-a" { availability_zone = "ap-northeast-2a" - cidr_block = "172.21.1.0/24" + cidr_block = "172.21.8.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dev-a.datasaker" "SubnetType" = "Private" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } @@ -31,13 +32,14 @@ resource "aws_subnet" "sbn-dev-a" { resource "aws_subnet" "sbn-dev-b" { availability_zone = "ap-northeast-2b" - cidr_block = "172.21.2.0/24" + cidr_block = "172.21.10.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dev-b.datasaker" "SubnetType" = "Private" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } @@ -46,13 +48,14 @@ resource "aws_subnet" "sbn-dev-b" { resource "aws_subnet" "sbn-dev-c" { availability_zone = "ap-northeast-2c" - cidr_block = "172.21.3.0/24" + cidr_block = "172.21.12.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dev-c.datasaker" "SubnetType" = "Private" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } diff --git a/terraform/tf-datasaker/dmz.tf b/terraform/tf-datasaker/dmz.tf index 20a02f2..a0cddf9 100644 --- a/terraform/tf-datasaker/dmz.tf +++ b/terraform/tf-datasaker/dmz.tf @@ -15,13 +15,14 @@ output "sbn_dmz_c_id" { resource "aws_subnet" "sbn-dmz-a" { availability_zone = "ap-northeast-2a" - cidr_block = "172.21.0.0/28" + cidr_block = "172.21.0.0/24" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dmz-a.datasaker" - "SubnetType" = "Public" + "SubnetType" = "Utility" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } @@ -30,13 +31,14 @@ resource "aws_subnet" "sbn-dmz-a" { resource "aws_subnet" "sbn-dmz-b" { availability_zone = "ap-northeast-2b" - cidr_block = "172.21.0.16/28" + cidr_block = "172.21.1.0/24" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dmz-b.datasaker" - "SubnetType" = "Public" + "SubnetType" = "Utility" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } @@ -45,13 +47,14 @@ resource "aws_subnet" "sbn-dmz-b" { resource "aws_subnet" "sbn-dmz-c" { availability_zone = "ap-northeast-2c" - cidr_block = "172.21.0.32/28" + cidr_block = "172.21.2.0/24" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { "Name" = "sbn-dmz-c.datasaker" - "SubnetType" = "Public" + "SubnetType" = "Utility" "kubernetes.io/cluster/datasaker" = "owned" + "kubernetes.io/cluster/dev.datasaker.io" = "shared" "kubernetes.io/role/elb" = "1" "kubernetes.io/role/internal-elb" = "1" } diff --git a/terraform/tf-datasaker/iac.tf b/terraform/tf-datasaker/iac.tf index a9148f3..59d601d 100644 --- a/terraform/tf-datasaker/iac.tf +++ b/terraform/tf-datasaker/iac.tf @@ -15,7 +15,7 @@ resource "aws_route" "route-private-rt-datasaker-iac-0-0-0-0--0" { resource "aws_subnet" "sbn-iac-a" { availability_zone = "ap-northeast-2a" - cidr_block = "172.21.4.0/24" + cidr_block = "172.21.16.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { @@ -30,7 +30,7 @@ resource "aws_subnet" "sbn-iac-a" { resource "aws_subnet" "sbn-iac-b" { availability_zone = "ap-northeast-2b" - cidr_block = "172.21.5.0/24" + cidr_block = "172.21.18.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { @@ -45,7 +45,7 @@ resource "aws_subnet" "sbn-iac-b" { resource "aws_subnet" "sbn-iac-c" { availability_zone = "ap-northeast-2c" - cidr_block = "172.21.6.0/24" + cidr_block = "172.21.20.0/23" enable_resource_name_dns_a_record_on_launch = true private_dns_hostname_type_on_launch = "resource-name" tags = { diff --git a/terraform/tf-datasaker/terraform.tfstate b/terraform/tf-datasaker/terraform.tfstate index 68e452a..130de28 100644 --- a/terraform/tf-datasaker/terraform.tfstate +++ b/terraform/tf-datasaker/terraform.tfstate @@ -1,19 +1,19 @@ { "version": 4, "terraform_version": "1.1.9", - "serial": 915, + "serial": 1107, "lineage": "0d7102e1-4b04-a7c0-069c-c81a4ba42c0d", "outputs": { "sbn_dmz_a_id": { - "value": "subnet-0d762a41fb41d63e5", + "value": "subnet-0de55619bee2411f8", "type": "string" }, "sbn_dmz_b_id": { - "value": "subnet-0b4f418020349fb84", + "value": "subnet-0a5d787353f874684", "type": "string" }, "sbn_dmz_c_id": { - "value": "subnet-05b9f4f02955c3307", + "value": "subnet-0ee26ffc561efb292", "type": "string" }, "vpc_datasaker_cidr_block": { @@ -21,7 +21,7 @@ "type": "string" }, "vpc_datasaker_id": { - "value": "vpc-03cbb88e181ccb46e", + "value": "vpc-0b6e0b906c678a22f", "type": "string" } }, @@ -35,7 +35,7 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:autoscaling:ap-northeast-2:508259851457:autoScalingGroup:b9014582-ad5c-4141-a8a8-60254fd3a594:autoScalingGroupName/ag-dmz-bastion-datasaker", + "arn": "arn:aws:autoscaling:ap-northeast-2:508259851457:autoScalingGroup:5bf925ed-4e89-4f61-a2fa-8da5c293a916:autoScalingGroupName/ag-dmz-bastion-datasaker", "availability_zones": [ "ap-northeast-2a", "ap-northeast-2b" @@ -65,12 +65,12 @@ "launch_configuration": "", "launch_template": [ { - "id": "lt-0e09e47760cf44939", + "id": "lt-0a499abed36cc72a3", "name": "lt-dmz-bastion-datasaker", - "version": "5" + "version": "1" } ], - "load_balancers": [], + "load_balancers": null, "max_instance_lifetime": 0, "max_size": 1, "metrics_granularity": "1Minute", @@ -82,7 +82,7 @@ "placement_group": "", "protect_from_scale_in": false, "service_linked_role_arn": "arn:aws:iam::508259851457:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", - "suspended_processes": [], + "suspended_processes": null, "tag": [ { "key": "Name", @@ -91,12 +91,12 @@ } ], "tags": null, - "target_group_arns": [], - "termination_policies": [], + "target_group_arns": null, + "termination_policies": null, "timeouts": null, "vpc_zone_identifier": [ - "subnet-0b4f418020349fb84", - "subnet-0d762a41fb41d63e5" + "subnet-0a5d787353f874684", + "subnet-0de55619bee2411f8" ], "wait_for_capacity_timeout": "10m", "wait_for_elb_capacity": null, @@ -125,21 +125,21 @@ "schema_version": 0, "attributes": { "address": null, - "allocation_id": "eipalloc-0136737cbb023c71d", + "allocation_id": "eipalloc-08b46670f70c2d11d", "associate_with_private_ip": null, - "association_id": "eipassoc-0ff5d589fed492a50", + "association_id": "", "carrier_ip": "", "customer_owned_ip": "", "customer_owned_ipv4_pool": "", "domain": "vpc", - "id": "eipalloc-0136737cbb023c71d", + "id": "eipalloc-08b46670f70c2d11d", "instance": "", "network_border_group": "ap-northeast-2", - "network_interface": "eni-052f88aeb468c6ae6", - "private_dns": "ip-172-21-0-12.ap-northeast-2.compute.internal", - "private_ip": "172.21.0.12", - "public_dns": "ec2-43-200-251-68.ap-northeast-2.compute.amazonaws.com", - "public_ip": "43.200.251.68", + "network_interface": "", + "private_dns": null, + "private_ip": "", + "public_dns": "ec2-54-180-77-139.ap-northeast-2.compute.amazonaws.com", + "public_ip": "54.180.77.139", "public_ipv4_pool": "amazon", "tags": { "Name": "eip-natgw-datasaker" @@ -164,8 +164,8 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:internet-gateway/igw-022757e5d9d9b36da", - "id": "igw-022757e5d9d9b36da", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:internet-gateway/igw-024cfe034db889aee", + "id": "igw-024cfe034db889aee", "owner_id": "508259851457", "tags": { "Name": "igw-datasaker" @@ -174,7 +174,7 @@ "Name": "igw-datasaker" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19", @@ -198,7 +198,7 @@ "id": "kp-bastion-datasaker", "key_name": "kp-bastion-datasaker", "key_name_prefix": "", - "key_pair_id": "key-0ff8e8e0c0d4b4e17", + "key_pair_id": "key-0e4f1d4dfa2a4082a", "key_type": "rsa", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDv9Bk/20f0xHLQN1Mnub0VwsbRw7ggubeUZ+pUVaX9BD7uUud/ITktmTArbabLJLGgWx64la6+6VuQHauzX/cpMp4dVxoaySQDGPsB+V0WnXaq0pWop5BoJaPO75lpk/Kp7NFtn9x3315Rqmis1Df1UrQehMkqunnr2jWkil6iueAckztpsnqxlb8S+uVYiM7C4HsVx8XdOT3WtfUv+hzDlejy11nzi5T4HMT70O107N4g5CrEapluc7M3NfxCFhz5Gxu8P0dfJKLs9fFT4E8DRfGly5/cDcKbiJHSAZYRN6UwKr3z7LAw8aIW8JWflXn1fMZ92qdiT04kN8ZdVzyMpUiWMXJQPrfI2EHT/OHAympzKrXnT98oIqJANE4Eq72OG9Hrb6Tauk8Bde5/v3P9d7m5Zi9tx+01PZ1JQR+1dkJeV3Am6mjKWrxIowKPol2chnARoU7y1rEZGGi+09bD5hUq7KW6z61DUIlCMYF0Oq0IMs/voQP8zqpDmvSPNJc= hsgahm@ws-ubuntu", "tags": { @@ -223,7 +223,7 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:launch-template/lt-0e09e47760cf44939", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:launch-template/lt-0a499abed36cc72a3", "block_device_mappings": [ { "device_name": "/dev/xvda", @@ -256,7 +256,7 @@ "enclave_options": [], "hibernation_options": [], "iam_instance_profile": [], - "id": "lt-0e09e47760cf44939", + "id": "lt-0a499abed36cc72a3", "image_id": "ami-0ea5eb4b05645aa8a", "instance_initiated_shutdown_behavior": "", "instance_market_options": [], @@ -264,7 +264,7 @@ "instance_type": "t3.small", "kernel_id": "", "key_name": "kp-bastion-datasaker", - "latest_version": 5, + "latest_version": 1, "license_specification": [], "maintenance_options": [], "metadata_options": [ @@ -292,18 +292,18 @@ "device_index": 0, "interface_type": "", "ipv4_address_count": 0, - "ipv4_addresses": [], + "ipv4_addresses": null, "ipv4_prefix_count": 0, - "ipv4_prefixes": [], + "ipv4_prefixes": null, "ipv6_address_count": 0, - "ipv6_addresses": [], + "ipv6_addresses": null, "ipv6_prefix_count": 0, - "ipv6_prefixes": [], + "ipv6_prefixes": null, "network_card_index": 0, "network_interface_id": "", "private_ip_address": "", "security_groups": [ - "sg-0943a344944f5ed76" + "sg-07f27eba164d59dfa" ], "subnet_id": "" } @@ -311,7 +311,7 @@ "placement": [], "private_dns_name_options": [], "ram_disk_id": "", - "security_group_names": [], + "security_group_names": null, "tag_specifications": [], "tags": { "Name": "lt-dmz-bastion-datasaker" @@ -321,13 +321,14 @@ }, "update_default_version": null, "user_data": "", - "vpc_security_group_ids": [] + "vpc_security_group_ids": null }, "sensitive_attributes": [], "private": "bnVsbA==", "dependencies": [ "aws_key_pair.kp-bastion-datasaker", - "aws_security_group.sg-dmz-datasaker" + "aws_security_group.sg-dmz-datasaker", + "aws_vpc.vpc-datasaker" ], "create_before_destroy": true } @@ -342,13 +343,13 @@ { "schema_version": 0, "attributes": { - "allocation_id": "eipalloc-0136737cbb023c71d", + "allocation_id": "eipalloc-08b46670f70c2d11d", "connectivity_type": "public", - "id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "eni-052f88aeb468c6ae6", - "private_ip": "172.21.0.12", - "public_ip": "43.200.251.68", - "subnet_id": "subnet-0d762a41fb41d63e5", + "id": "nat-0149e41fa11377dfd", + "network_interface_id": "eni-013cb7e830637be38", + "private_ip": "172.21.0.244", + "public_ip": "54.180.77.139", + "subnet_id": "subnet-0de55619bee2411f8", "tags": { "Name": "natgw-datasaker" }, @@ -382,15 +383,15 @@ "destination_ipv6_cidr_block": "", "destination_prefix_list_id": "", "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "id": "r-rtb-062998ea429f0b4a31080289494", + "gateway_id": "igw-024cfe034db889aee", + "id": "r-rtb-057ad7940bd4d0e471080289494", "instance_id": "", "instance_owner_id": "", "local_gateway_id": "", "nat_gateway_id": "", "network_interface_id": "", "origin": "CreateRoute", - "route_table_id": "rtb-062998ea429f0b4a3", + "route_table_id": "rtb-057ad7940bd4d0e47", "state": "active", "timeouts": null, "transit_gateway_id": "", @@ -422,15 +423,15 @@ "destination_ipv6_cidr_block": "::/0", "destination_prefix_list_id": "", "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "id": "r-rtb-062998ea429f0b4a32750132062", + "gateway_id": "igw-024cfe034db889aee", + "id": "r-rtb-057ad7940bd4d0e472750132062", "instance_id": "", "instance_owner_id": "", "local_gateway_id": "", "nat_gateway_id": "", "network_interface_id": "", "origin": "CreateRoute", - "route_table_id": "rtb-062998ea429f0b4a3", + "route_table_id": "rtb-057ad7940bd4d0e47", "state": "active", "timeouts": null, "transit_gateway_id": "", @@ -463,14 +464,14 @@ "destination_prefix_list_id": "", "egress_only_gateway_id": "", "gateway_id": "", - "id": "r-rtb-0beefbb491d9651011080289494", + "id": "r-rtb-0222c34fe748358201080289494", "instance_id": "", "instance_owner_id": "", "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", + "nat_gateway_id": "nat-0149e41fa11377dfd", "network_interface_id": "", "origin": "CreateRoute", - "route_table_id": "rtb-0beefbb491d965101", + "route_table_id": "rtb-0222c34fe74835820", "state": "active", "timeouts": null, "transit_gateway_id": "", @@ -506,14 +507,14 @@ "destination_prefix_list_id": "", "egress_only_gateway_id": "", "gateway_id": "", - "id": "r-rtb-0afbb9291fd78c4601080289494", + "id": "r-rtb-02fa87e2873f596311080289494", "instance_id": "", "instance_owner_id": "", "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", + "nat_gateway_id": "nat-0149e41fa11377dfd", "network_interface_id": "", "origin": "CreateRoute", - "route_table_id": "rtb-0afbb9291fd78c460", + "route_table_id": "rtb-02fa87e2873f59631", "state": "active", "timeouts": null, "transit_gateway_id": "", @@ -542,28 +543,11 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-0beefbb491d965101", - "id": "rtb-0beefbb491d965101", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-0222c34fe74835820", + "id": "rtb-0222c34fe74835820", "owner_id": "508259851457", "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], + "route": [], "tags": { "Name": "rt-datasaker-dev" }, @@ -571,7 +555,7 @@ "Name": "rt-datasaker-dev" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", @@ -590,28 +574,11 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-0afbb9291fd78c460", - "id": "rtb-0afbb9291fd78c460", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-02fa87e2873f59631", + "id": "rtb-02fa87e2873f59631", "owner_id": "508259851457", "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], + "route": [], "tags": { "Name": "rt-datasaker-iac" }, @@ -619,7 +586,7 @@ "Name": "rt-datasaker-iac" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", @@ -638,44 +605,11 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-062998ea429f0b4a3", - "id": "rtb-062998ea429f0b4a3", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-057ad7940bd4d0e47", + "id": "rtb-057ad7940bd4d0e47", "owner_id": "508259851457", "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "instance_id": "", - "ipv6_cidr_block": "::/0", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], + "route": [], "tags": { "Name": "rt-datasaker-pub" }, @@ -683,7 +617,7 @@ "Name": "rt-datasaker-pub" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", @@ -703,9 +637,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0627f343c27b3fb15", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-021536c4f12971c74" + "id": "rtbassoc-0459be806f7412cc4", + "route_table_id": "rtb-0222c34fe74835820", + "subnet_id": "subnet-0c875e254456809f7" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -727,9 +661,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0c4078a79542dfedd", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-0c90842daa15aa7c7" + "id": "rtbassoc-099a4bf3c51f94b59", + "route_table_id": "rtb-0222c34fe74835820", + "subnet_id": "subnet-05672a669943fc12f" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -751,9 +685,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0d2d78c9a5732501f", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-0ae3ab7ae241fe761" + "id": "rtbassoc-02bd4922ff04a92c9", + "route_table_id": "rtb-0222c34fe74835820", + "subnet_id": "subnet-0940fd78504acbbde" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -775,9 +709,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0cccbbf9004f3431b", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-0d762a41fb41d63e5" + "id": "rtbassoc-02b2175a50034917e", + "route_table_id": "rtb-057ad7940bd4d0e47", + "subnet_id": "subnet-0de55619bee2411f8" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -799,9 +733,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0c00a25a4324cf2f9", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-0b4f418020349fb84" + "id": "rtbassoc-094e0157c4a065f34", + "route_table_id": "rtb-057ad7940bd4d0e47", + "subnet_id": "subnet-0a5d787353f874684" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -823,9 +757,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0e6a01d3bbf0f41e3", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-05b9f4f02955c3307" + "id": "rtbassoc-0989ba31270eaee71", + "route_table_id": "rtb-057ad7940bd4d0e47", + "subnet_id": "subnet-0ee26ffc561efb292" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -847,9 +781,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0fa9b2133e43a0469", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-098225ee426615f0a" + "id": "rtbassoc-0d9f0c70e1d159ede", + "route_table_id": "rtb-02fa87e2873f59631", + "subnet_id": "subnet-08330ae1fd7c5d77e" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -871,9 +805,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-00541f9ae239c0a01", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-04321c9a5150c8317" + "id": "rtbassoc-04f3b256e59854ac7", + "route_table_id": "rtb-02fa87e2873f59631", + "subnet_id": "subnet-0c8c4d1df1a2920e4" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -895,9 +829,9 @@ "schema_version": 0, "attributes": { "gateway_id": "", - "id": "rtbassoc-0d495c6189944030b", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-0881ae89f2b5c3cbd" + "id": "rtbassoc-0fd9ea480fc1bd2ec", + "route_table_id": "rtb-02fa87e2873f59631", + "subnet_id": "subnet-06e724baf7d879769" }, "sensitive_attributes": [], "private": "bnVsbA==", @@ -918,65 +852,11 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-00be91bb5d8d3662e", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-0aed067b37f609a6f", "description": "Security group dev-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-00be91bb5d8d3662e", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], + "egress": [], + "id": "sg-0aed067b37f609a6f", + "ingress": [], "name": "secg-dev-datasaker", "name_prefix": "", "owner_id": "508259851457", @@ -988,7 +868,7 @@ "Name": "sg-dev-datasaker" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", @@ -1007,65 +887,11 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-0943a344944f5ed76", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-07f27eba164d59dfa", "description": "Security group dmz-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-0943a344944f5ed76", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], + "egress": [], + "id": "sg-07f27eba164d59dfa", + "ingress": [], "name": "secg-dmz-datasaker", "name_prefix": "", "owner_id": "508259851457", @@ -1077,7 +903,7 @@ "Name": "sg-dmz-datasaker" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", @@ -1097,65 +923,11 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-0bd4433b800af1c07", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-07d769a675ed05b35", "description": "Security group iac-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-0bd4433b800af1c07", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 0 - } - ], + "egress": [], + "id": "sg-07d769a675ed05b35", + "ingress": [], "name": "secg-iac-datasaker", "name_prefix": "", "owner_id": "508259851457", @@ -1167,7 +939,7 @@ "Name": "sg-iac-datasaker" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", @@ -1189,13 +961,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 0, - "id": "sgrule-2827480756", + "id": "sgrule-3441164403", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-00be91bb5d8d3662e", + "security_group_id": "sg-0aed067b37f609a6f", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1223,13 +995,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 0, - "id": "sgrule-1335572876", + "id": "sgrule-3688340000", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-0943a344944f5ed76", + "security_group_id": "sg-07f27eba164d59dfa", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1257,13 +1029,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 0, - "id": "sgrule-2287612910", + "id": "sgrule-3927496604", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-0bd4433b800af1c07", + "security_group_id": "sg-07d769a675ed05b35", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1291,13 +1063,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 8, - "id": "sgrule-2425217960", + "id": "sgrule-4111863151", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "icmp", - "security_group_id": "sg-00be91bb5d8d3662e", + "security_group_id": "sg-0aed067b37f609a6f", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1325,13 +1097,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 8, - "id": "sgrule-2269989047", + "id": "sgrule-3238310385", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "icmp", - "security_group_id": "sg-0bd4433b800af1c07", + "security_group_id": "sg-07d769a675ed05b35", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1359,13 +1131,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 22, - "id": "sgrule-4048334944", + "id": "sgrule-4096359581", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-00be91bb5d8d3662e", + "security_group_id": "sg-0aed067b37f609a6f", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1393,13 +1165,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 22, - "id": "sgrule-2060637527", + "id": "sgrule-3300624291", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-0bd4433b800af1c07", + "security_group_id": "sg-07d769a675ed05b35", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1429,11 +1201,11 @@ ], "description": null, "from_port": 22, - "id": "sgrule-1796821016", + "id": "sgrule-3686484473", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-0943a344944f5ed76", + "security_group_id": "sg-07f27eba164d59dfa", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1443,7 +1215,8 @@ "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", "dependencies": [ - "aws_security_group.sg-dmz-datasaker" + "aws_security_group.sg-dmz-datasaker", + "aws_vpc.vpc-datasaker" ] } ] @@ -1462,11 +1235,11 @@ ], "description": null, "from_port": 22, - "id": "sgrule-3885655888", + "id": "sgrule-807868327", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "tcp", - "security_group_id": "sg-0943a344944f5ed76", + "security_group_id": "sg-07f27eba164d59dfa", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1476,7 +1249,8 @@ "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", "dependencies": [ - "aws_security_group.sg-dmz-datasaker" + "aws_security_group.sg-dmz-datasaker", + "aws_vpc.vpc-datasaker" ] } ] @@ -1493,13 +1267,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 8, - "id": "sgrule-942255193", + "id": "sgrule-320777473", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "icmp", - "security_group_id": "sg-00be91bb5d8d3662e", + "security_group_id": "sg-0aed067b37f609a6f", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1527,13 +1301,13 @@ "cidr_blocks": [ "0.0.0.0/0" ], - "description": "", + "description": null, "from_port": 8, - "id": "sgrule-2322679084", + "id": "sgrule-361293936", "ipv6_cidr_blocks": null, "prefix_list_ids": null, "protocol": "icmp", - "security_group_id": "sg-0bd4433b800af1c07", + "security_group_id": "sg-07d769a675ed05b35", "self": false, "source_security_group_id": null, "timeouts": null, @@ -1558,16 +1332,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-021536c4f12971c74", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0c875e254456809f7", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2a", "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.1.0/24", + "cidr_block": "172.21.8.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-021536c4f12971c74", + "id": "subnet-0c875e254456809f7", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1580,6 +1354,7 @@ "Name": "sbn-dev-a.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, @@ -1587,11 +1362,12 @@ "Name": "sbn-dev-a.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1610,16 +1386,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0c90842daa15aa7c7", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-05672a669943fc12f", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2b", "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.2.0/24", + "cidr_block": "172.21.10.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0c90842daa15aa7c7", + "id": "subnet-05672a669943fc12f", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1632,6 +1408,7 @@ "Name": "sbn-dev-b.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, @@ -1639,11 +1416,12 @@ "Name": "sbn-dev-b.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1662,16 +1440,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0ae3ab7ae241fe761", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0940fd78504acbbde", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2c", "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.3.0/24", + "cidr_block": "172.21.12.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0ae3ab7ae241fe761", + "id": "subnet-0940fd78504acbbde", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1684,6 +1462,7 @@ "Name": "sbn-dev-c.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, @@ -1691,11 +1470,12 @@ "Name": "sbn-dev-c.datasaker", "SubnetType": "Private", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1714,16 +1494,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0d762a41fb41d63e5", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0de55619bee2411f8", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2a", "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.0.0/28", + "cidr_block": "172.21.0.0/24", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0d762a41fb41d63e5", + "id": "subnet-0de55619bee2411f8", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1734,20 +1514,22 @@ "private_dns_hostname_type_on_launch": "resource-name", "tags": { "Name": "sbn-dmz-a.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "tags_all": { "Name": "sbn-dmz-a.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1766,16 +1548,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0b4f418020349fb84", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0a5d787353f874684", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2b", "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.0.16/28", + "cidr_block": "172.21.1.0/24", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0b4f418020349fb84", + "id": "subnet-0a5d787353f874684", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1786,20 +1568,22 @@ "private_dns_hostname_type_on_launch": "resource-name", "tags": { "Name": "sbn-dmz-b.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "tags_all": { "Name": "sbn-dmz-b.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1818,16 +1602,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-05b9f4f02955c3307", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0ee26ffc561efb292", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2c", "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.0.32/28", + "cidr_block": "172.21.2.0/24", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-05b9f4f02955c3307", + "id": "subnet-0ee26ffc561efb292", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1838,20 +1622,22 @@ "private_dns_hostname_type_on_launch": "resource-name", "tags": { "Name": "sbn-dmz-c.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "tags_all": { "Name": "sbn-dmz-c.datasaker", - "SubnetType": "Public", + "SubnetType": "Utility", "kubernetes.io/cluster/datasaker": "owned", + "kubernetes.io/cluster/dev.datasaker.io": "shared", "kubernetes.io/role/elb": "1", "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1870,16 +1656,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-098225ee426615f0a", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-08330ae1fd7c5d77e", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2a", "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.4.0/24", + "cidr_block": "172.21.16.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-098225ee426615f0a", + "id": "subnet-08330ae1fd7c5d77e", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1903,7 +1689,7 @@ "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1922,16 +1708,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-04321c9a5150c8317", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0c8c4d1df1a2920e4", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2b", "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.5.0/24", + "cidr_block": "172.21.18.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-04321c9a5150c8317", + "id": "subnet-0c8c4d1df1a2920e4", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -1955,7 +1741,7 @@ "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -1974,16 +1760,16 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0881ae89f2b5c3cbd", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-06e724baf7d879769", "assign_ipv6_address_on_creation": false, "availability_zone": "ap-northeast-2c", "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.6.0/24", + "cidr_block": "172.21.20.0/23", "customer_owned_ipv4_pool": "", "enable_dns64": false, "enable_resource_name_dns_a_record_on_launch": true, "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0881ae89f2b5c3cbd", + "id": "subnet-06e724baf7d879769", "ipv6_cidr_block": "", "ipv6_cidr_block_association_id": "", "ipv6_native": false, @@ -2007,7 +1793,7 @@ "kubernetes.io/role/internal-elb": "1" }, "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", @@ -2026,27 +1812,27 @@ { "schema_version": 1, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:vpc/vpc-03cbb88e181ccb46e", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:vpc/vpc-0b6e0b906c678a22f", "assign_generated_ipv6_cidr_block": true, "cidr_block": "172.21.0.0/16", - "default_network_acl_id": "acl-065f8bec9dbad2106", - "default_route_table_id": "rtb-01a448dbcf55051bf", - "default_security_group_id": "sg-0d13c39fc35a8585c", - "dhcp_options_id": "dopt-03658cf6324535d26", + "default_network_acl_id": "acl-02d1c338537220483", + "default_route_table_id": "rtb-08b098c50e293cbc8", + "default_security_group_id": "sg-013825bbf7b2a894a", + "dhcp_options_id": "dopt-021d4c0e9e138e269", "enable_classiclink": false, "enable_classiclink_dns_support": false, "enable_dns_hostnames": true, "enable_dns_support": true, - "id": "vpc-03cbb88e181ccb46e", + "id": "vpc-0b6e0b906c678a22f", "instance_tenancy": "default", "ipv4_ipam_pool_id": null, "ipv4_netmask_length": null, - "ipv6_association_id": "vpc-cidr-assoc-0122be3d12ff2da6f", - "ipv6_cidr_block": "2406:da12:f6a:f500::/56", + "ipv6_association_id": "vpc-cidr-assoc-08b51a13149ce18a5", + "ipv6_cidr_block": "2406:da12:a18:8900::/56", "ipv6_cidr_block_network_border_group": "ap-northeast-2", "ipv6_ipam_pool_id": "", "ipv6_netmask_length": 0, - "main_route_table_id": "rtb-01a448dbcf55051bf", + "main_route_table_id": "rtb-08b098c50e293cbc8", "owner_id": "508259851457", "tags": { "Name": "vpc-datasaker" @@ -2070,15 +1856,15 @@ { "schema_version": 0, "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:dhcp-options/dopt-03658cf6324535d26", + "arn": "arn:aws:ec2:ap-northeast-2:508259851457:dhcp-options/dopt-086d99ae90c3cde26", "domain_name": "ap-northeast-2.compute.internal", "domain_name_servers": [ "AmazonProvidedDNS" ], - "id": "dopt-03658cf6324535d26", - "netbios_name_servers": [], + "id": "dopt-086d99ae90c3cde26", + "netbios_name_servers": null, "netbios_node_type": "", - "ntp_servers": [], + "ntp_servers": null, "owner_id": "508259851457", "tags": { "Name": "vpc-dhcp-datasaker" @@ -2101,9 +1887,9 @@ { "schema_version": 0, "attributes": { - "dhcp_options_id": "dopt-03658cf6324535d26", - "id": "dopt-03658cf6324535d26-vpc-03cbb88e181ccb46e", - "vpc_id": "vpc-03cbb88e181ccb46e" + "dhcp_options_id": "dopt-086d99ae90c3cde26", + "id": "dopt-086d99ae90c3cde26-vpc-0b6e0b906c678a22f", + "vpc_id": "vpc-0b6e0b906c678a22f" }, "sensitive_attributes": [], "private": "bnVsbA==", diff --git a/terraform/tf-datasaker/terraform.tfstate.backup b/terraform/tf-datasaker/terraform.tfstate.backup index b2fc924..63fa888 100644 --- a/terraform/tf-datasaker/terraform.tfstate.backup +++ b/terraform/tf-datasaker/terraform.tfstate.backup @@ -1,2153 +1,8 @@ { "version": 4, "terraform_version": "1.1.9", - "serial": 901, + "serial": 1058, "lineage": "0d7102e1-4b04-a7c0-069c-c81a4ba42c0d", - "outputs": { - "sbn_dmz_a_id": { - "value": "subnet-0d762a41fb41d63e5", - "type": "string" - }, - "sbn_dmz_b_id": { - "value": "subnet-0b4f418020349fb84", - "type": "string" - }, - "sbn_dmz_c_id": { - "value": "subnet-05b9f4f02955c3307", - "type": "string" - }, - "vpc_datasaker_cidr_block": { - "value": "172.21.0.0/16", - "type": "string" - }, - "vpc_datasaker_id": { - "value": "vpc-03cbb88e181ccb46e", - "type": "string" - } - }, - "resources": [ - { - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "ag-dmz-bastion-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:autoscaling:ap-northeast-2:508259851457:autoScalingGroup:b9014582-ad5c-4141-a8a8-60254fd3a594:autoScalingGroupName/ag-dmz-bastion-datasaker", - "availability_zones": [ - "ap-northeast-2a", - "ap-northeast-2b" - ], - "capacity_rebalance": false, - "context": "", - "default_cooldown": 300, - "default_instance_warmup": 0, - "desired_capacity": 1, - "enabled_metrics": [ - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMaxSize", - "GroupMinSize", - "GroupPendingInstances", - "GroupStandbyInstances", - "GroupTerminatingInstances", - "GroupTotalInstances" - ], - "force_delete": false, - "force_delete_warm_pool": false, - "health_check_grace_period": 300, - "health_check_type": "EC2", - "id": "ag-dmz-bastion-datasaker", - "initial_lifecycle_hook": [], - "instance_refresh": [], - "launch_configuration": "", - "launch_template": [ - { - "id": "lt-0e09e47760cf44939", - "name": "lt-dmz-bastion-datasaker", - "version": "4" - } - ], - "load_balancers": [], - "max_instance_lifetime": 0, - "max_size": 1, - "metrics_granularity": "1Minute", - "min_elb_capacity": null, - "min_size": 1, - "mixed_instances_policy": [], - "name": "ag-dmz-bastion-datasaker", - "name_prefix": "", - "placement_group": "", - "protect_from_scale_in": false, - "service_linked_role_arn": "arn:aws:iam::508259851457:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", - "suspended_processes": [], - "tag": [ - { - "key": "Name", - "propagate_at_launch": true, - "value": "ag-dmz-bastion-datasaker" - } - ], - "tags": null, - "target_group_arns": [], - "termination_policies": [], - "timeouts": null, - "vpc_zone_identifier": [ - "subnet-0b4f418020349fb84", - "subnet-0d762a41fb41d63e5" - ], - "wait_for_capacity_timeout": "10m", - "wait_for_elb_capacity": null, - "warm_pool": [] - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDAsInVwZGF0ZSI6NjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "aws_key_pair.kp-bastion-datasaker", - "aws_launch_template.lt-dmz-bastion-datasaker", - "aws_security_group.sg-dmz-datasaker", - "aws_subnet.sbn-dmz-a", - "aws_subnet.sbn-dmz-b" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_eip", - "name": "eip-natgw-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "address": null, - "allocation_id": "eipalloc-0136737cbb023c71d", - "associate_with_private_ip": null, - "association_id": "eipassoc-0ff5d589fed492a50", - "carrier_ip": "", - "customer_owned_ip": "", - "customer_owned_ipv4_pool": "", - "domain": "vpc", - "id": "eipalloc-0136737cbb023c71d", - "instance": "", - "network_border_group": "ap-northeast-2", - "network_interface": "eni-052f88aeb468c6ae6", - "private_dns": "ip-172-21-0-12.ap-northeast-2.compute.internal", - "private_ip": "172.21.0.12", - "public_dns": "ec2-43-200-251-68.ap-northeast-2.compute.amazonaws.com", - "public_ip": "43.200.251.68", - "public_ipv4_pool": "amazon", - "tags": { - "Name": "eip-natgw-datasaker" - }, - "tags_all": { - "Name": "eip-natgw-datasaker" - }, - "timeouts": null, - "vpc": true - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjoxODAwMDAwMDAwMDAsInJlYWQiOjkwMDAwMDAwMDAwMCwidXBkYXRlIjozMDAwMDAwMDAwMDB9fQ==" - } - ] - }, - { - "mode": "managed", - "type": "aws_internet_gateway", - "name": "igw-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:internet-gateway/igw-022757e5d9d9b36da", - "id": "igw-022757e5d9d9b36da", - "owner_id": "508259851457", - "tags": { - "Name": "igw-datasaker" - }, - "tags_all": { - "Name": "igw-datasaker" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_key_pair", - "name": "kp-bastion-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:key-pair/kp-bastion-datasaker", - "fingerprint": "10:d1:18:c9:92:30:9f:fe:fd:c4:52:1b:20:2c:30:6d", - "id": "kp-bastion-datasaker", - "key_name": "kp-bastion-datasaker", - "key_name_prefix": "", - "key_pair_id": "key-0ff8e8e0c0d4b4e17", - "key_type": "rsa", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDv9Bk/20f0xHLQN1Mnub0VwsbRw7ggubeUZ+pUVaX9BD7uUud/ITktmTArbabLJLGgWx64la6+6VuQHauzX/cpMp4dVxoaySQDGPsB+V0WnXaq0pWop5BoJaPO75lpk/Kp7NFtn9x3315Rqmis1Df1UrQehMkqunnr2jWkil6iueAckztpsnqxlb8S+uVYiM7C4HsVx8XdOT3WtfUv+hzDlejy11nzi5T4HMT70O107N4g5CrEapluc7M3NfxCFhz5Gxu8P0dfJKLs9fFT4E8DRfGly5/cDcKbiJHSAZYRN6UwKr3z7LAw8aIW8JWflXn1fMZ92qdiT04kN8ZdVzyMpUiWMXJQPrfI2EHT/OHAympzKrXnT98oIqJANE4Eq72OG9Hrb6Tauk8Bde5/v3P9d7m5Zi9tx+01PZ1JQR+1dkJeV3Am6mjKWrxIowKPol2chnARoU7y1rEZGGi+09bD5hUq7KW6z61DUIlCMYF0Oq0IMs/voQP8zqpDmvSPNJc= hsgahm@ws-ubuntu", - "tags": { - "Name": "kp-bastion-datasaker" - }, - "tags_all": { - "Name": "kp-bastion-datasaker" - } - }, - "sensitive_attributes": [], - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==", - "create_before_destroy": true - } - ] - }, - { - "mode": "managed", - "type": "aws_launch_template", - "name": "lt-dmz-bastion-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:launch-template/lt-0e09e47760cf44939", - "block_device_mappings": [ - { - "device_name": "/dev/xvda", - "ebs": [ - { - "delete_on_termination": "true", - "encrypted": "true", - "iops": 3000, - "kms_key_id": "", - "snapshot_id": "", - "throughput": 125, - "volume_size": 20, - "volume_type": "gp3" - } - ], - "no_device": "", - "virtual_name": "" - } - ], - "capacity_reservation_specification": [], - "cpu_options": [], - "credit_specification": [], - "default_version": 1, - "description": "", - "disable_api_stop": false, - "disable_api_termination": false, - "ebs_optimized": "", - "elastic_gpu_specifications": [], - "elastic_inference_accelerator": [], - "enclave_options": [], - "hibernation_options": [], - "iam_instance_profile": [], - "id": "lt-0e09e47760cf44939", - "image_id": "ami-0ea5eb4b05645aa8a", - "instance_initiated_shutdown_behavior": "", - "instance_market_options": [], - "instance_requirements": [], - "instance_type": "t3a.small", - "kernel_id": "", - "key_name": "kp-bastion-datasaker", - "latest_version": 4, - "license_specification": [], - "maintenance_options": [], - "metadata_options": [ - { - "http_endpoint": "enabled", - "http_protocol_ipv6": "disabled", - "http_put_response_hop_limit": 3, - "http_tokens": "required", - "instance_metadata_tags": "disabled" - } - ], - "monitoring": [ - { - "enabled": false - } - ], - "name": "lt-dmz-bastion-datasaker", - "name_prefix": "", - "network_interfaces": [ - { - "associate_carrier_ip_address": "", - "associate_public_ip_address": "true", - "delete_on_termination": "true", - "description": "", - "device_index": 0, - "interface_type": "", - "ipv4_address_count": 0, - "ipv4_addresses": [], - "ipv4_prefix_count": 0, - "ipv4_prefixes": [], - "ipv6_address_count": 0, - "ipv6_addresses": [], - "ipv6_prefix_count": 0, - "ipv6_prefixes": [], - "network_card_index": 0, - "network_interface_id": "", - "private_ip_address": "", - "security_groups": [ - "sg-0943a344944f5ed76" - ], - "subnet_id": "" - } - ], - "placement": [], - "private_dns_name_options": [], - "ram_disk_id": "", - "security_group_names": [], - "tag_specifications": [], - "tags": { - "Name": "lt-dmz-bastion-datasaker" - }, - "tags_all": { - "Name": "lt-dmz-bastion-datasaker" - }, - "update_default_version": null, - "user_data": "", - "vpc_security_group_ids": [] - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_key_pair.kp-bastion-datasaker", - "aws_security_group.sg-dmz-datasaker" - ], - "create_before_destroy": true - } - ] - }, - { - "mode": "managed", - "type": "aws_nat_gateway", - "name": "natgw-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "allocation_id": "eipalloc-0136737cbb023c71d", - "connectivity_type": "public", - "id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "eni-052f88aeb468c6ae6", - "private_ip": "172.21.0.12", - "public_ip": "43.200.251.68", - "subnet_id": "subnet-0d762a41fb41d63e5", - "tags": { - "Name": "natgw-datasaker" - }, - "tags_all": { - "Name": "natgw-datasaker" - } - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_eip.eip-natgw-datasaker", - "aws_internet_gateway.igw-datasaker", - "aws_subnet.sbn-dmz-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route", - "name": "r-0-0-0-0--0", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "carrier_gateway_id": "", - "core_network_arn": "", - "destination_cidr_block": "0.0.0.0/0", - "destination_ipv6_cidr_block": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "id": "r-rtb-062998ea429f0b4a31080289494", - "instance_id": "", - "instance_owner_id": "", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "origin": "CreateRoute", - "route_table_id": "rtb-062998ea429f0b4a3", - "state": "active", - "timeouts": null, - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_internet_gateway.igw-datasaker", - "aws_route_table.rt-datasaker-pub", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route", - "name": "r-__--0", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "carrier_gateway_id": "", - "core_network_arn": "", - "destination_cidr_block": "", - "destination_ipv6_cidr_block": "::/0", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "id": "r-rtb-062998ea429f0b4a32750132062", - "instance_id": "", - "instance_owner_id": "", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "origin": "CreateRoute", - "route_table_id": "rtb-062998ea429f0b4a3", - "state": "active", - "timeouts": null, - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_internet_gateway.igw-datasaker", - "aws_route_table.rt-datasaker-pub", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route", - "name": "route-private-rt-datasaker-dev-0-0-0-0--0", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "carrier_gateway_id": "", - "core_network_arn": "", - "destination_cidr_block": "0.0.0.0/0", - "destination_ipv6_cidr_block": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "id": "r-rtb-0beefbb491d9651011080289494", - "instance_id": "", - "instance_owner_id": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "origin": "CreateRoute", - "route_table_id": "rtb-0beefbb491d965101", - "state": "active", - "timeouts": null, - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_eip.eip-natgw-datasaker", - "aws_internet_gateway.igw-datasaker", - "aws_nat_gateway.natgw-datasaker", - "aws_route_table.rt-datasaker-dev", - "aws_subnet.sbn-dmz-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route", - "name": "route-private-rt-datasaker-iac-0-0-0-0--0", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "carrier_gateway_id": "", - "core_network_arn": "", - "destination_cidr_block": "0.0.0.0/0", - "destination_ipv6_cidr_block": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "id": "r-rtb-0afbb9291fd78c4601080289494", - "instance_id": "", - "instance_owner_id": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "origin": "CreateRoute", - "route_table_id": "rtb-0afbb9291fd78c460", - "state": "active", - "timeouts": null, - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_eip.eip-natgw-datasaker", - "aws_internet_gateway.igw-datasaker", - "aws_nat_gateway.natgw-datasaker", - "aws_route_table.rt-datasaker-iac", - "aws_subnet.sbn-dmz-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table", - "name": "rt-datasaker-dev", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-0beefbb491d965101", - "id": "rtb-0beefbb491d965101", - "owner_id": "508259851457", - "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], - "tags": { - "Name": "rt-datasaker-dev" - }, - "tags_all": { - "Name": "rt-datasaker-dev" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table", - "name": "rt-datasaker-iac", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-0afbb9291fd78c460", - "id": "rtb-0afbb9291fd78c460", - "owner_id": "508259851457", - "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "nat-0b7f00b0dad0aebc2", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], - "tags": { - "Name": "rt-datasaker-iac" - }, - "tags_all": { - "Name": "rt-datasaker-iac" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table", - "name": "rt-datasaker-pub", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-062998ea429f0b4a3", - "id": "rtb-062998ea429f0b4a3", - "owner_id": "508259851457", - "propagating_vgws": [], - "route": [ - { - "carrier_gateway_id": "", - "cidr_block": "", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "instance_id": "", - "ipv6_cidr_block": "::/0", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - }, - { - "carrier_gateway_id": "", - "cidr_block": "0.0.0.0/0", - "core_network_arn": "", - "destination_prefix_list_id": "", - "egress_only_gateway_id": "", - "gateway_id": "igw-022757e5d9d9b36da", - "instance_id": "", - "ipv6_cidr_block": "", - "local_gateway_id": "", - "nat_gateway_id": "", - "network_interface_id": "", - "transit_gateway_id": "", - "vpc_endpoint_id": "", - "vpc_peering_connection_id": "" - } - ], - "tags": { - "Name": "rt-datasaker-pub" - }, - "tags_all": { - "Name": "rt-datasaker-pub" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsImRlbGV0ZSI6MzAwMDAwMDAwMDAwLCJ1cGRhdGUiOjEyMDAwMDAwMDAwMH19", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dev-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0627f343c27b3fb15", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-021536c4f12971c74" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-dev", - "aws_subnet.sbn-dev-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dev-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0c4078a79542dfedd", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-0c90842daa15aa7c7" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-dev", - "aws_subnet.sbn-dev-b", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dev-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0d2d78c9a5732501f", - "route_table_id": "rtb-0beefbb491d965101", - "subnet_id": "subnet-0ae3ab7ae241fe761" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-dev", - "aws_subnet.sbn-dev-c", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dmz-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0cccbbf9004f3431b", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-0d762a41fb41d63e5" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-pub", - "aws_subnet.sbn-dmz-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dmz-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0c00a25a4324cf2f9", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-0b4f418020349fb84" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-pub", - "aws_subnet.sbn-dmz-b", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-dmz-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0e6a01d3bbf0f41e3", - "route_table_id": "rtb-062998ea429f0b4a3", - "subnet_id": "subnet-05b9f4f02955c3307" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-pub", - "aws_subnet.sbn-dmz-c", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-iac-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0fa9b2133e43a0469", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-098225ee426615f0a" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-iac", - "aws_subnet.sbn-iac-a", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-iac-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-00541f9ae239c0a01", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-04321c9a5150c8317" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-iac", - "aws_subnet.sbn-iac-b", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_route_table_association", - "name": "rta-iac-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "gateway_id": "", - "id": "rtbassoc-0d495c6189944030b", - "route_table_id": "rtb-0afbb9291fd78c460", - "subnet_id": "subnet-0881ae89f2b5c3cbd" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_route_table.rt-datasaker-iac", - "aws_subnet.sbn-iac-c", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group", - "name": "sg-dev-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-00be91bb5d8d3662e", - "description": "Security group dev-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-00be91bb5d8d3662e", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "name": "secg-dev-datasaker", - "name_prefix": "", - "owner_id": "508259851457", - "revoke_rules_on_delete": false, - "tags": { - "Name": "sg-dev-datasaker" - }, - "tags_all": { - "Name": "sg-dev-datasaker" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group", - "name": "sg-dmz-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-0943a344944f5ed76", - "description": "Security group dmz-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-0943a344944f5ed76", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "name": "secg-dmz-datasaker", - "name_prefix": "", - "owner_id": "508259851457", - "revoke_rules_on_delete": false, - "tags": { - "Name": "sg-dmz-datasaker" - }, - "tags_all": { - "Name": "sg-dmz-datasaker" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ], - "create_before_destroy": true - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group", - "name": "sg-iac-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-0bd4433b800af1c07", - "description": "Security group iac-datasaker", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 65535 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 8 - } - ], - "id": "sg-0bd4433b800af1c07", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 22 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 0 - } - ], - "name": "secg-iac-datasaker", - "name_prefix": "", - "owner_id": "508259851457", - "revoke_rules_on_delete": false, - "tags": { - "Name": "sg-iac-datasaker" - }, - "tags_all": { - "Name": "sg-iac-datasaker" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "id": "sgrule-2827480756", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-00be91bb5d8d3662e", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 65535, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dev-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "id": "sgrule-1335572876", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-0943a344944f5ed76", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 65535, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dmz-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 0, - "id": "sgrule-2287612910", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-0bd4433b800af1c07", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 65535, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-iac-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-2425217960", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-00be91bb5d8d3662e", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 8, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dev-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-icmp-dmz-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-2006004880", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-0943a344944f5ed76", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 8, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dmz-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-2269989047", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-0bd4433b800af1c07", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 0, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-iac-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "id": "sgrule-4048334944", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-00be91bb5d8d3662e", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 22, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dev-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dmz-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "id": "sgrule-3651736617", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-0943a344944f5ed76", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 22, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dmz-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 22, - "id": "sgrule-2060637527", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "tcp", - "security_group_id": "sg-0bd4433b800af1c07", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 22, - "type": "ingress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-iac-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-942255193", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-00be91bb5d8d3662e", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 8, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dev-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-to-0-0-0-0--0-egress-icmp-dmz-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-2631111511", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-0943a344944f5ed76", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 8, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-dmz-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_security_group_rule", - "name": "sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "", - "from_port": 8, - "id": "sgrule-2322679084", - "ipv6_cidr_blocks": null, - "prefix_list_ids": null, - "protocol": "icmp", - "security_group_id": "sg-0bd4433b800af1c07", - "self": false, - "source_security_group_id": null, - "timeouts": null, - "to_port": 8, - "type": "egress" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9LCJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "dependencies": [ - "aws_security_group.sg-iac-datasaker", - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dev-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-021536c4f12971c74", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2a", - "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.1.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-021536c4f12971c74", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dev-a.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dev-a.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dev-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0c90842daa15aa7c7", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2b", - "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.2.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0c90842daa15aa7c7", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dev-b.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dev-b.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dev-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0ae3ab7ae241fe761", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2c", - "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.3.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0ae3ab7ae241fe761", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dev-c.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dev-c.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dmz-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0d762a41fb41d63e5", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2a", - "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.0.0/28", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0d762a41fb41d63e5", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dmz-a.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dmz-a.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dmz-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0b4f418020349fb84", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2b", - "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.0.16/28", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0b4f418020349fb84", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dmz-b.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dmz-b.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-dmz-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-05b9f4f02955c3307", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2c", - "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.0.32/28", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-05b9f4f02955c3307", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-dmz-c.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-dmz-c.datasaker", - "SubnetType": "Public", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-iac-a", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-098225ee426615f0a", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2a", - "availability_zone_id": "apne2-az1", - "cidr_block": "172.21.4.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-098225ee426615f0a", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-iac-a.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-iac-a.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-iac-b", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-04321c9a5150c8317", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2b", - "availability_zone_id": "apne2-az2", - "cidr_block": "172.21.5.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-04321c9a5150c8317", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-iac-b.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-iac-b.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_subnet", - "name": "sbn-iac-c", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0881ae89f2b5c3cbd", - "assign_ipv6_address_on_creation": false, - "availability_zone": "ap-northeast-2c", - "availability_zone_id": "apne2-az3", - "cidr_block": "172.21.6.0/24", - "customer_owned_ipv4_pool": "", - "enable_dns64": false, - "enable_resource_name_dns_a_record_on_launch": true, - "enable_resource_name_dns_aaaa_record_on_launch": false, - "id": "subnet-0881ae89f2b5c3cbd", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "ipv6_native": false, - "map_customer_owned_ip_on_launch": false, - "map_public_ip_on_launch": false, - "outpost_arn": "", - "owner_id": "508259851457", - "private_dns_hostname_type_on_launch": "resource-name", - "tags": { - "Name": "sbn-iac-c.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "tags_all": { - "Name": "sbn-iac-c.datasaker", - "SubnetType": "Private", - "kubernetes.io/cluster/datasaker": "owned", - "kubernetes.io/role/elb": "1", - "kubernetes.io/role/internal-elb": "1" - }, - "timeouts": null, - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", - "dependencies": [ - "aws_vpc.vpc-datasaker" - ] - } - ] - }, - { - "mode": "managed", - "type": "aws_vpc", - "name": "vpc-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:vpc/vpc-03cbb88e181ccb46e", - "assign_generated_ipv6_cidr_block": true, - "cidr_block": "172.21.0.0/16", - "default_network_acl_id": "acl-065f8bec9dbad2106", - "default_route_table_id": "rtb-01a448dbcf55051bf", - "default_security_group_id": "sg-0d13c39fc35a8585c", - "dhcp_options_id": "dopt-03658cf6324535d26", - "enable_classiclink": false, - "enable_classiclink_dns_support": false, - "enable_dns_hostnames": true, - "enable_dns_support": true, - "id": "vpc-03cbb88e181ccb46e", - "instance_tenancy": "default", - "ipv4_ipam_pool_id": null, - "ipv4_netmask_length": null, - "ipv6_association_id": "vpc-cidr-assoc-0122be3d12ff2da6f", - "ipv6_cidr_block": "2406:da12:f6a:f500::/56", - "ipv6_cidr_block_network_border_group": "ap-northeast-2", - "ipv6_ipam_pool_id": "", - "ipv6_netmask_length": 0, - "main_route_table_id": "rtb-01a448dbcf55051bf", - "owner_id": "508259851457", - "tags": { - "Name": "vpc-datasaker" - }, - "tags_all": { - "Name": "vpc-datasaker" - } - }, - "sensitive_attributes": [], - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==", - "create_before_destroy": true - } - ] - }, - { - "mode": "managed", - "type": "aws_vpc_dhcp_options", - "name": "vpc-dhcp-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:ec2:ap-northeast-2:508259851457:dhcp-options/dopt-03658cf6324535d26", - "domain_name": "ap-northeast-2.compute.internal", - "domain_name_servers": [ - "AmazonProvidedDNS" - ], - "id": "dopt-03658cf6324535d26", - "netbios_name_servers": [], - "netbios_node_type": "", - "ntp_servers": [], - "owner_id": "508259851457", - "tags": { - "Name": "vpc-dhcp-datasaker" - }, - "tags_all": { - "Name": "vpc-dhcp-datasaker" - } - }, - "sensitive_attributes": [], - "private": "bnVsbA==" - } - ] - }, - { - "mode": "managed", - "type": "aws_vpc_dhcp_options_association", - "name": "vpc-dhcp-asso-datasaker", - "provider": "provider[\"registry.terraform.io/hashicorp/aws\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "dhcp_options_id": "dopt-03658cf6324535d26", - "id": "dopt-03658cf6324535d26-vpc-03cbb88e181ccb46e", - "vpc_id": "vpc-03cbb88e181ccb46e" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "aws_vpc.vpc-datasaker", - "aws_vpc_dhcp_options.vpc-dhcp-datasaker" - ] - } - ] - } - ] + "outputs": {}, + "resources": [] } diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_masters.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_nodes.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..7a23370 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_masters.dev.datasaker.io_policy @@ -0,0 +1,273 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::hostedzone/Z072735718G25WNVKU834" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifyVolume", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..aa71a3a --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy @@ -0,0 +1,50 @@ +{ + "Statement": [ + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:ModifyNetworkInterfaceAttribute", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key b/terraform/tf-kops-dev-20200916-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key new file mode 100644 index 0000000..b10a93b --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..36a3c01 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-a +InstanceGroupRole: Node +NodeupConfigHash: jyt+itIoHkfChG5oykaR/YcW2X+YK02YqH7IwlOP474= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..5612062 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-b +InstanceGroupRole: Node +NodeupConfigHash: F10MZ5YMtLK1UChahPw/MwMFfjLrY81DKA4nft2Tobk= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..024a1e9 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-c +InstanceGroupRole: Node +NodeupConfigHash: fEdAb1pHGvBokNYyHZ4CzDj3eq1vsZxS5FrjEUayRuU= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..dae2b69 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-a +InstanceGroupRole: Node +NodeupConfigHash: oZQY/P4yvbXnh4dW93Et8YpN0q6liFWsIMAyny6862g= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..8bb061f --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-b +InstanceGroupRole: Node +NodeupConfigHash: oc7Bss3+h8wRUqWSY05NxslVT4WbcTxzvi5KtLp7vuw= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..7fc0ebc --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-a +InstanceGroupRole: Node +NodeupConfigHash: YzHBVETSqynzG1++32lK6kNelMH04Gx2UDgb7bJWVm8= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..82cce5d --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-b +InstanceGroupRole: Node +NodeupConfigHash: RcLvuahs6C2C746ouG575y7zIBPE/45aLDopp3qLKak= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..18a15fd --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data @@ -0,0 +1,175 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-c +InstanceGroupRole: Node +NodeupConfigHash: GZFMJ+HtfNFNr+OV9OCtF2wJLZDODBwV/NFLgSCHB2I= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..24d409c --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2a +InstanceGroupRole: Master +NodeupConfigHash: bFvgCW9ijGRs5u8kNAX/s53tD3afsvYDdJVNW1Kq5OY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..acc4bba --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2b +InstanceGroupRole: Master +NodeupConfigHash: 12BbVAVTnRcOLqha45NC0eii/lUhVtoQrIYpccKF/lQ= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..aa9efdc --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data @@ -0,0 +1,275 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9 + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.6.6 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2c +InstanceGroupRole: Master +NodeupConfigHash: 6HuG0yYyZf5DLo50saQaB9ApKbrna49ygtHGjkyb/l4= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_cluster-completed.spec_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000..3eb6a6c --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,251 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + name: dev.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io + configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io + containerRuntime: containerd + containerd: + logLevel: info + version: 1.6.6 + dnsZone: Z072735718G25WNVKU834 + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: main + version: 3.5.4 + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: events + version: 3.5.4 + externalDns: + provider: dns-controller + iam: + allowContainerRegistry: true + legacy: false + keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + enableLeaderMigration: true + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.21.3 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a + logLevel: 2 + kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.23.10 + masterInternalName: api.internal.dev.datasaker.io + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.dev.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-0b6e0b906c678a22f + networking: + calico: + encapsulationMode: ipip + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.21.8.0/23 + id: subnet-0c875e254456809f7 + name: ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.10.0/23 + id: subnet-05672a669943fc12f + name: ap-northeast-2b + type: Private + zone: ap-northeast-2b + - cidr: 172.21.12.0/23 + id: subnet-0940fd78504acbbde + name: ap-northeast-2c + type: Private + zone: ap-northeast-2c + - cidr: 172.21.0.0/24 + id: subnet-0de55619bee2411f8 + name: utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + - cidr: 172.21.1.0/24 + id: subnet-0a5d787353f874684 + name: utility-ap-northeast-2b + type: Utility + zone: ap-northeast-2b + - cidr: 172.21.2.0/24 + id: subnet-0ee26ffc561efb292 + name: utility-ap-northeast-2c + type: Utility + zone: ap-northeast-2c + topology: + dns: + type: Public + masters: private + nodes: private diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content new file mode 100644 index 0000000..3a2e037 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content @@ -0,0 +1,792 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-attacher-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-provisioner-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-resizer-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-snapshotter-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-attacher-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-provisioner-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-resizer-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-snapshotter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-getter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-csi-node-role +subjects: +- kind: ServiceAccount + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-role +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: ebs-csi-node-sa + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - controller + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --k8s-tag-cluster-id=dev.datasaker.io + - --extra-tags=KubernetesCluster=dev.datasaker.io + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 + imagePullPolicy: IfNotPresent + name: csi-provisioner + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b + imagePullPolicy: IfNotPresent + name: csi-attacher + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 + imagePullPolicy: IfNotPresent + name: csi-resizer + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: ebs-csi-controller-sa + tolerations: + - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - emptyDir: {} + name: socket-dir + +--- + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content new file mode 100644 index 0000000..9149730 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content @@ -0,0 +1,69 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 530752f323a7573cedaa993ac169181c2d36d70e1cb4950d3c1a3347ac586826 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: 1060dbbcbf4f9768081b838e619da1fc3970ef2b86886f8e5c6ff3e2842c2aa3 + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml + manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4 + name: leader-migration.rbac.addons.k8s.io + selector: + k8s-addon: leader-migration.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 3e67c5934d55a5f5ebbd8a97e428aa6d9749812ba209a3dc1f1cb9449ee75c26 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 + - id: k8s-1.22 + manifest: networking.projectcalico.org/k8s-1.22.yaml + manifestHash: 94e23c0a435bb93ebb2271d4352bd25a98b8d84064a40a1ff2077111cfe6dc44 + name: networking.projectcalico.org + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 + - id: k8s-1.17 + manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml + manifestHash: 80c38e6bb751e5c9e58a013b9c09b70d0ca34383d15889e09df214090c52713c + name: aws-ebs-csi-driver.addons.k8s.io + selector: + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + version: 9.99.0 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..8e33a3a --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,385 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.8.6@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..d8dd7bc --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,140 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.24.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.24.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z072735718G25WNVKU834 + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.24.1@sha256:d0bff3dff30ec695702eb954b7568e3b5aa164f458a70be1d3f5194423ef90a6 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000..7f1e62c --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.24.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.24.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.24.1@sha256:dec29a983e633e2d3321fef86e6fea211784b2dc9b62ce735d708e781ef4919c + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000..0cde75e --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content new file mode 100644 index 0000000..86d68c7 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +rules: +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-provider-extraction-migration + resources: + - leases + verbs: + - create + - list + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system::leader-locking-migration +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-controller-manager +- kind: ServiceAccount + name: kube-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000..502c682 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content new file mode 100644 index 0000000..116baaa --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content @@ -0,0 +1,4778 @@ +apiVersion: v1 +data: + calico_backend: bird + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "assign_ipv4": "true", + "assign_ipv6": "false", + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + typha_service_name: none + veth_mtu: "0" +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-config + namespace: kube-system + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing intepreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + floatingIPs: + default: Disabled + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for VXLAN networking. Optional as Felix determines + this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled. + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the Wireguard interface. [Default: wg.calico]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by Wireguard. [Default: 51820]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the Wireguard interface. + See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + nullable: true + type: integer + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ipreservations + verbs: + - list +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + verbs: + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - kubecontrollersconfigurations + verbs: + - get + - create + - update + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - endpoints + - services + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - patch +- apiGroups: + - "" + resourceNames: + - calico-node + resources: + - serviceaccounts/token + verbs: + - create +- apiGroups: + - crd.projectcalico.org + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - caliconodestatuses + verbs: + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - ipamconfigs + verbs: + - get +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + verbs: + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-node + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-node + kops.k8s.io/managed-by: kops + spec: + containers: + - env: + - name: DATASTORE_TYPE + value: kubernetes + - name: WAIT_FOR_DATASTORE + value: "true" + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + - name: CLUSTER_TYPE + value: kops,bgp + - name: IP + value: autodetect + - name: IP6 + value: none + - name: IP_AUTODETECTION_METHOD + value: first-found + - name: IP6_AUTODETECTION_METHOD + value: none + - name: CALICO_IPV4POOL_IPIP + value: CrossSubnet + - name: CALICO_IPV4POOL_VXLAN + value: Never + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: CALICO_IPV4POOL_CIDR + value: 100.96.0.0/11 + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: ACCEPT + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_AWSSRCDSTCHECK + value: Disable + - name: FELIX_BPFENABLED + value: "false" + - name: FELIX_BPFEXTERNALSERVICEMODE + value: Tunnel + - name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED + value: "false" + - name: FELIX_BPFLOGLEVEL + value: "Off" + - name: FELIX_CHAININSERTMODE + value: insert + - name: FELIX_IPTABLESBACKEND + value: Auto + - name: FELIX_LOGSEVERITYSCREEN + value: info + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "9091" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "false" + - name: FELIX_WIREGUARDENABLED + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/node:v3.23.3@sha256:b356c2334729810de4781819ac7cf7cb05e49b8be9387e6bba2755df95d1cd84 + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-node + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + resources: + requests: + cpu: 100m + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /var/run/nodeagent + name: policysync + - mountPath: /sys/fs/bpf + name: bpffs + - mountPath: /var/log/calico/cni + name: cni-log-dir + readOnly: true + hostNetwork: true + initContainers: + - command: + - /opt/cni/bin/calico-ipam + - -upgrade + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.23.3@sha256:83db083069fc8612798feda6d9c3413f075ec44e29d302f3af0a11df1cef5823 + name: upgrade-ipam + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - command: + - /opt/cni/bin/install + env: + - name: CNI_CONF_NAME + value: 10-calico.conflist + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + key: cni_network_config + name: calico-config + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CNI_MTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: SLEEP + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.23.3@sha256:83db083069fc8612798feda6d9c3413f075ec44e29d302f3af0a11df1cef5823 + name: install-cni + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - command: + - calico-node + - -init + - -best-effort + image: docker.io/calico/node:v3.23.3@sha256:b356c2334729810de4781819ac7cf7cb05e49b8be9387e6bba2755df95d1cd84 + name: mount-bpffs + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs + mountPropagation: Bidirectional + name: sys-fs + - mountPath: /var/run/calico + mountPropagation: Bidirectional + name: var-run-calico + - mountPath: /nodeproc + name: nodeproc + readOnly: true + - command: + - sh + - -c + - echo Temporary fix to avoid server side apply issues + image: busybox@sha256:ad9bd57a3a57cc95515c537b89aaa69d83a6df54c4050fcf2b41ad367bec0cd5 + name: flexvol-driver + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: calico-node + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /var/run/calico + name: var-run-calico + - hostPath: + path: /var/lib/calico + name: var-lib-calico + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + name: sys-fs + - hostPath: + path: /sys/fs/bpf + type: Directory + name: bpffs + - hostPath: + path: /proc + name: nodeproc + - hostPath: + path: /opt/cni/bin + name: cni-bin-dir + - hostPath: + path: /etc/cni/net.d + name: cni-net-dir + - hostPath: + path: /var/log/calico/cni + name: cni-log-dir + - hostPath: + path: /var/lib/cni/networks + name: host-local-net-dir + - hostPath: + path: /var/run/nodeagent + type: DirectoryOrCreate + name: policysync + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-kube-controllers + kops.k8s.io/managed-by: kops + name: calico-kube-controllers + namespace: kube-system + spec: + containers: + - env: + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + image: docker.io/calico/kube-controllers:v3.23.3@sha256:a1773f60d4bb15cbb6d73d2da9e6ab28c36fb863263f87160bf02de3bed43991 + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-kube-controllers + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: calico-kube-controllers + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000..4e8a971 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,118 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-csi-1-21 +parameters: + encrypted: "true" + type: gp3 +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-events_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-main_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_kops-version.txt_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000..3e940eb --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.24.1 \ No newline at end of file diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-events_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-events_content new file mode 100644 index 0000000..14b3445 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-events_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-main_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-main_content new file mode 100644 index 0000000..281102f --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-etcdmanager-main_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000..e6ba6f9 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.24.1@sha256:b969a40a66d7c9781b8f393c4bd1cc90828c45b0419e24bf2192be9a10fd6c44 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content new file mode 100644 index 0000000..96634c6 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content new file mode 100644 index 0000000..e499afa --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content new file mode 100644 index 0000000..ca7dba4 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-data-c_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: data + kops.k8s.io/instancegroup: dev-data-c + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content new file mode 100644 index 0000000..c82c356 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content new file mode 100644 index 0000000..2c649ea --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content new file mode 100644 index 0000000..3eacbc4 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-a_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-a + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content new file mode 100644 index 0000000..106c085 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-b_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-b + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content new file mode 100644 index 0000000..7bed97f --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-dev-process-c_content @@ -0,0 +1,70 @@ +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-c + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content new file mode 100644 index 0000000..d561827 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content new file mode 100644 index 0000000..53809f3 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2b + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content new file mode 100644 index 0000000..4afc821 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content @@ -0,0 +1,265 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet + - 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz + - 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 + - be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64 + - ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64 + arm64: + - 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet + - d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz + - 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64 + - 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64 + - 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2c + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.6.6 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/terraform/tf-kops-dev-20200916-ip/kubernetes.tf b/terraform/tf-kops-dev-20200916-ip/kubernetes.tf new file mode 100644 index 0000000..6940d81 --- /dev/null +++ b/terraform/tf-kops-dev-20200916-ip/kubernetes.tf @@ -0,0 +1,2358 @@ +locals { + cluster_name = "dev.datasaker.io" + master_autoscaling_group_ids = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] + master_security_group_ids = [aws_security_group.masters-dev-datasaker-io.id] + masters_role_arn = aws_iam_role.masters-dev-datasaker-io.arn + masters_role_name = aws_iam_role.masters-dev-datasaker-io.name + node_autoscaling_group_ids = [aws_autoscaling_group.dev-data-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] + node_security_group_ids = [aws_security_group.nodes-dev-datasaker-io.id] + node_subnet_ids = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0c875e254456809f7"] + nodes_role_arn = aws_iam_role.nodes-dev-datasaker-io.arn + nodes_role_name = aws_iam_role.nodes-dev-datasaker-io.name + region = "ap-northeast-2" + subnet_ap-northeast-2a_id = "subnet-0c875e254456809f7" + subnet_ap-northeast-2b_id = "subnet-05672a669943fc12f" + subnet_ap-northeast-2c_id = "subnet-0940fd78504acbbde" + subnet_ids = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0a5d787353f874684", "subnet-0c875e254456809f7", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] + subnet_utility-ap-northeast-2a_id = "subnet-0de55619bee2411f8" + subnet_utility-ap-northeast-2b_id = "subnet-0a5d787353f874684" + subnet_utility-ap-northeast-2c_id = "subnet-0ee26ffc561efb292" + vpc_id = "vpc-0b6e0b906c678a22f" +} + +output "cluster_name" { + value = "dev.datasaker.io" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-dev-datasaker-io.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-dev-datasaker-io.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-dev-datasaker-io.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.dev-data-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-dev-datasaker-io.id] +} + +output "node_subnet_ids" { + value = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0c875e254456809f7"] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-dev-datasaker-io.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-dev-datasaker-io.name +} + +output "region" { + value = "ap-northeast-2" +} + +output "subnet_ap-northeast-2a_id" { + value = "subnet-0c875e254456809f7" +} + +output "subnet_ap-northeast-2b_id" { + value = "subnet-05672a669943fc12f" +} + +output "subnet_ap-northeast-2c_id" { + value = "subnet-0940fd78504acbbde" +} + +output "subnet_ids" { + value = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0a5d787353f874684", "subnet-0c875e254456809f7", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] +} + +output "subnet_utility-ap-northeast-2a_id" { + value = "subnet-0de55619bee2411f8" +} + +output "subnet_utility-ap-northeast-2b_id" { + value = "subnet-0a5d787353f874684" +} + +output "subnet_utility-ap-northeast-2c_id" { + value = "subnet-0ee26ffc561efb292" +} + +output "vpc_id" { + value = "vpc-0b6e0b906c678a22f" +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "files" + region = "ap-northeast-2" +} + +resource "aws_autoscaling_group" "dev-data-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-a-dev-datasaker-io.id + version = aws_launch_template.dev-data-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-data-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-b-dev-datasaker-io.id + version = aws_launch_template.dev-data-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-data-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-c-dev-datasaker-io.id + version = aws_launch_template.dev-data-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_autoscaling_group" "dev-mgmt-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-a-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-mgmt-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-b-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-process-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-a-dev-datasaker-io.id + version = aws_launch_template.dev-process-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-process-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-b-dev-datasaker-io.id + version = aws_launch_template.dev-process-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-process-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-c-dev-datasaker-io.id + version = aws_launch_template.dev-process-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2a-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2a.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2b-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2b.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2c-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2c.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_ebs_volume" "a-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "a-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_elb" "api-dev-datasaker-io" { + connection_draining = true + connection_draining_timeout = 300 + cross_zone_load_balancing = false + health_check { + healthy_threshold = 2 + interval = 10 + target = "SSL:443" + timeout = 5 + unhealthy_threshold = 2 + } + idle_timeout = 300 + listener { + instance_port = 443 + instance_protocol = "TCP" + lb_port = 443 + lb_protocol = "TCP" + } + name = "api-dev-datasaker-io-ru2qna" + security_groups = [aws_security_group.api-elb-dev-datasaker-io.id] + subnets = ["subnet-0a5d787353f874684", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + role = aws_iam_role.masters-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + role = aws_iam_role.nodes-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "masters-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.dev.datasaker.io_policy") + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "nodes-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.dev.datasaker.io_policy") + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_masters.dev.datasaker.io_policy") + role = aws_iam_role.masters-dev-datasaker-io.name +} + +resource "aws_iam_role_policy" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy") + role = aws_iam_role.nodes-dev-datasaker-io.name +} + +resource "aws_key_pair" "kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6" { + key_name = "kubernetes.dev.datasaker.io-c8:01:5e:c8:c1:4f:2a:1b:71:6c:21:3a:5c:04:7b:d6" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key") + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_launch_template" "dev-data-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "m5.4xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "c5.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2a-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2b-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2c-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0ea5eb4b05645aa8a" + instance_type = "t3.small" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data") +} + +resource "aws_route53_record" "api-dev-datasaker-io" { + alias { + evaluate_target_health = false + name = aws_elb.api-dev-datasaker-io.dns_name + zone_id = aws_elb.api-dev-datasaker-io.zone_id + } + name = "api.dev.datasaker.io" + type = "A" + zone_id = "/hostedzone/Z072735718G25WNVKU834" +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "dev.datasaker.io/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content") + key = "dev.datasaker.io/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-bootstrap" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content") + key = "dev.datasaker.io/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "dev.datasaker.io/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "dev.datasaker.io/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-leader-migration-rbac-addons-k8s-io-k8s-1-23" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content") + key = "dev.datasaker.io/addons/leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-limit-range-addons-k8s-io" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content") + key = "dev.datasaker.io/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-networking-projectcalico-org-k8s-1-22" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.22_content") + key = "dev.datasaker.io/addons/networking.projectcalico.org/k8s-1.22.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "dev.datasaker.io/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "dev.datasaker.io/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "dev.datasaker.io/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "dev.datasaker.io/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events_content") + key = "dev.datasaker.io/manifests/etcd/events.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main_content") + key = "dev.datasaker.io/manifests/etcd/main.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "dev.datasaker.io/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-a_content") + key = "dev.datasaker.io/igconfig/node/dev-data-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-b_content") + key = "dev.datasaker.io/igconfig/node/dev-data-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-c_content") + key = "dev.datasaker.io/igconfig/node/dev-data-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-a_content") + key = "dev.datasaker.io/igconfig/node/dev-process-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-b_content") + key = "dev.datasaker.io/igconfig/node/dev-process-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-c_content") + key = "dev.datasaker.io/igconfig/node/dev-process-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "api-elb-dev-datasaker-io" { + description = "Security group for api ELB" + name = "api-elb.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api-elb.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group" "masters-dev-datasaker-io" { + description = "Security group for masters" + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group" "nodes-dev-datasaker-io" { + description = "Security group for nodes" + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-masters-dev-datasaker-io" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-nodes-dev-datasaker-io" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + from_port = 443 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-4-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "4" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-1to2379-masters-dev-datasaker-io" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-2382to4000-masters-dev-datasaker-io" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-4003to65535-masters-dev-datasaker-io" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-udp-1to65535-masters-dev-datasaker-io" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "https-elb-to-master" { + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmpv6-pmtu-api-elb-__--0" { + from_port = -1 + ipv6_cidr_blocks = ["::/0"] + protocol = "icmpv6" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = -1 + type = "ingress" +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +}