This commit is contained in:
havelight-ee
2023-05-11 13:55:28 +09:00
parent 55d4828037
commit 2d70373907
1390 changed files with 0 additions and 1398 deletions

View File

@@ -0,0 +1,22 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "4.27.0"
constraints = ">= 4.0.0"
hashes = [
"h1:w3j7YomUQ9IfRp3MUuY0+hFX1T1cawZoj0Xsc1a46bU=",
"zh:0f5ade3801fec487641e4f7d81e28075b716c787772f9709cc2378d20f325791",
"zh:19ffa83be6b6765a4f821a17b8d260dd0f192a6c40765fa53ac65fd042cb1f65",
"zh:3ac89d33ff8ca75bdc42f31c63ce0018ffc66aa69917c18713e824e381950e4e",
"zh:81a199724e74992c8a029a968d211cb45277d95a2e88d0f07ec85127b6c6849b",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
"zh:a2e2c851a37ef97bbccccd2e686b4d016abe207a7f56bff70b10bfdf8ed1cbfd",
"zh:baf844def338d77f8a3106b1411a1fe22e93a82e3dc51e5d33b766f741c4a6a3",
"zh:bc33137fae808f91da0a9de7031cbea77d0ee4eefb4d2ad6ab7f58cc2111a7ff",
"zh:c960ae2b33c8d3327f67a3db5ce1952315146d69dfc3f1b0922242e2b218eec8",
"zh:f3ea1a25797c79c035463a1188a6a42e131f391f3cb714975ce49ccd301cda07",
"zh:f7e77c871d38236e5fedee0086ff77ff396e88964348c794cf38e578fcc00293",
"zh:fb338d5dfafab907b8608bd66cad8ca9ae4679f8c62c2435c2056a38b719baa2",
]
}

View File

@@ -0,0 +1,133 @@
resource "aws_route_table" "rt-datasaker-dev" {
tags = {
"Name" = "rt-datasaker-dev"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route" "route-private-rt-datasaker-dev-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.natgw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-dev.id
}
resource "aws_subnet" "sbn-dev-a" {
availability_zone = "ap-northeast-2a"
cidr_block = "172.21.8.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-a.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dev-b" {
availability_zone = "ap-northeast-2b"
cidr_block = "172.21.10.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-b.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dev-c" {
availability_zone = "ap-northeast-2c"
cidr_block = "172.21.12.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-c.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route_table_association" "rta-dev-a" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-a.id
}
resource "aws_route_table_association" "rta-dev-b" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-b.id
}
resource "aws_route_table_association" "rta-dev-c" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-c.id
}
resource "aws_security_group" "sg-dev-datasaker" {
description = "Security group dev-datasaker"
name = "secg-dev-datasaker"
tags = {
"Name" = "sg-dev-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 8
type = "ingress"
}
resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 8
type = "egress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "tcp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 65535
type = "egress"
}

View File

@@ -0,0 +1,252 @@
output "sbn_dmz_a_id" {
value = aws_subnet.sbn-dmz-a.id
}
output "sbn_dmz_b_id" {
value = aws_subnet.sbn-dmz-b.id
}
output "sbn_dmz_c_id" {
value = aws_subnet.sbn-dmz-c.id
}
resource "aws_subnet" "sbn-dmz-a" {
availability_zone = "ap-northeast-2a"
cidr_block = "172.21.0.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-a.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dmz-b" {
availability_zone = "ap-northeast-2b"
cidr_block = "172.21.1.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-b.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dmz-c" {
availability_zone = "ap-northeast-2c"
cidr_block = "172.21.2.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-c.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route_table_association" "rta-dmz-a" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-a.id
}
resource "aws_route_table_association" "rta-dmz-b" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-b.id
}
resource "aws_route_table_association" "rta-dmz-c" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-c.id
}
resource "aws_security_group" "sg-dmz-datasaker" {
description = "Security group dmz-datasaker"
name = "secg-dmz-datasaker"
tags = {
"Name" = "sg-dmz-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 22
# protocol = "tcp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 22
# type = "ingress"
# }
resource "aws_security_group_rule" "sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io" {
cidr_blocks = ["115.178.73.2/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io" {
cidr_blocks = ["115.178.73.91/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 22
type = "ingress"
}
# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 8
# protocol = "icmp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 8
# type = "ingress"
# }
# resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 8
# protocol = "icmp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 8
# type = "egress"
# }
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 65535
type = "egress"
}
resource "aws_launch_template" "lt-dmz-bastion-datasaker" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 20
volume_type = "gp3"
}
}
image_id = "ami-0ea5eb4b05645aa8a"
instance_type = "t3.small"
key_name = aws_key_pair.kp-bastion-datasaker.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 3
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "lt-dmz-bastion-datasaker"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.sg-dmz-datasaker.id]
}
# tag_specifications {
# resource_type = "instance"
# tags = {
# "Name" = "lt-dmz-bastion-datasaker"
# }
# }
# tag_specifications {
# resource_type = "volume"
# tags = {
# "Name" = "master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io"
# }
# }
tags = {
"Name" = "lt-dmz-bastion-datasaker"
}
# user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io_user_data")
}
resource "aws_autoscaling_group" "ag-dmz-bastion-datasaker" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.lt-dmz-bastion-datasaker.id
version = aws_launch_template.lt-dmz-bastion-datasaker.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "ag-dmz-bastion-datasaker"
protect_from_scale_in = false
tag {
key = "Name"
propagate_at_launch = true
value = "ag-dmz-bastion-datasaker"
}
vpc_zone_identifier = [aws_subnet.sbn-dmz-a.id,aws_subnet.sbn-dmz-b.id]
}
resource "aws_eip" "eip-natgw-datasaker" {
# instance = aws_instance.web1-ec2.id
vpc = true
tags = {
Name = "eip-natgw-datasaker"
}
}
resource "aws_nat_gateway" "natgw-datasaker" {
allocation_id = aws_eip.eip-natgw-datasaker.id
subnet_id = aws_subnet.sbn-dmz-a.id
tags = {
Name = "natgw-datasaker"
}
# To ensure proper ordering, it is recommended to add an explicit dependency
# on the Internet Gateway for the VPC.
depends_on = [aws_internet_gateway.igw-datasaker]
}

View File

@@ -0,0 +1,126 @@
resource "aws_route_table" "rt-datasaker-iac" {
tags = {
"Name" = "rt-datasaker-iac"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route" "route-private-rt-datasaker-iac-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.natgw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-iac.id
}
resource "aws_subnet" "sbn-iac-a" {
availability_zone = "ap-northeast-2a"
cidr_block = "172.21.16.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-iac-a.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-iac-b" {
availability_zone = "ap-northeast-2b"
cidr_block = "172.21.18.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-iac-b.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-iac-c" {
availability_zone = "ap-northeast-2c"
cidr_block = "172.21.20.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-iac-c.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route_table_association" "rta-iac-a" {
route_table_id = aws_route_table.rt-datasaker-iac.id
subnet_id = aws_subnet.sbn-iac-a.id
}
resource "aws_route_table_association" "rta-iac-b" {
route_table_id = aws_route_table.rt-datasaker-iac.id
subnet_id = aws_subnet.sbn-iac-b.id
}
resource "aws_route_table_association" "rta-iac-c" {
route_table_id = aws_route_table.rt-datasaker-iac.id
subnet_id = aws_subnet.sbn-iac-c.id
}
resource "aws_security_group" "sg-iac-datasaker" {
description = "Security group iac-datasaker"
name = "secg-iac-datasaker"
tags = {
"Name" = "sg-iac-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-iac-datasaker.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-iac-datasaker.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-iac-datasaker.id
to_port = 8
type = "egress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "tcp"
security_group_id = aws_security_group.sg-iac-datasaker.id
to_port = 65535
type = "egress"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
{
"version": 4,
"terraform_version": "1.1.9",
"serial": 1058,
"lineage": "0d7102e1-4b04-a7c0-069c-c81a4ba42c0d",
"outputs": {},
"resources": []
}

View File

@@ -0,0 +1,93 @@
terraform {
required_version = ">= 0.15.0"
required_providers {
aws = {
"configuration_aliases" = [aws.files]
"source" = "hashicorp/aws"
"version" = ">= 4.0.0"
}
}
}
provider "aws" {
region = "ap-northeast-2"
}
provider "aws" {
alias = "files"
region = "ap-northeast-2"
}
output "vpc_datasaker_id" {
value = aws_vpc.vpc-datasaker.id
}
output "vpc_datasaker_cidr_block" {
value = aws_vpc.vpc-datasaker.cidr_block
}
resource "aws_vpc" "vpc-datasaker" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.21.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"Name" = "vpc-datasaker"
}
}
resource "aws_vpc_dhcp_options" "vpc-dhcp-datasaker" {
domain_name = "ap-northeast-2.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"Name" = "vpc-dhcp-datasaker"
}
}
resource "aws_vpc_dhcp_options_association" "vpc-dhcp-asso-datasaker" {
dhcp_options_id = aws_vpc_dhcp_options.vpc-dhcp-datasaker.id
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_internet_gateway" "igw-datasaker" {
tags = {
"Name" = "igw-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_key_pair" "kp-bastion-datasaker" {
key_name = "kp-bastion-datasaker"
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDv9Bk/20f0xHLQN1Mnub0VwsbRw7ggubeUZ+pUVaX9BD7uUud/ITktmTArbabLJLGgWx64la6+6VuQHauzX/cpMp4dVxoaySQDGPsB+V0WnXaq0pWop5BoJaPO75lpk/Kp7NFtn9x3315Rqmis1Df1UrQehMkqunnr2jWkil6iueAckztpsnqxlb8S+uVYiM7C4HsVx8XdOT3WtfUv+hzDlejy11nzi5T4HMT70O107N4g5CrEapluc7M3NfxCFhz5Gxu8P0dfJKLs9fFT4E8DRfGly5/cDcKbiJHSAZYRN6UwKr3z7LAw8aIW8JWflXn1fMZ92qdiT04kN8ZdVzyMpUiWMXJQPrfI2EHT/OHAympzKrXnT98oIqJANE4Eq72OG9Hrb6Tauk8Bde5/v3P9d7m5Zi9tx+01PZ1JQR+1dkJeV3Am6mjKWrxIowKPol2chnARoU7y1rEZGGi+09bD5hUq7KW6z61DUIlCMYF0Oq0IMs/voQP8zqpDmvSPNJc= hsgahm@ws-ubuntu"
tags = {
"Name" = "kp-bastion-datasaker"
}
}
resource "aws_route_table" "rt-datasaker-pub" {
tags = {
"Name" = "rt-datasaker-pub"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route" "r-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-pub.id
}
resource "aws_route" "r-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.igw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-pub.id
}

View File

@@ -0,0 +1,22 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "4.27.0"
constraints = ">= 4.0.0"
hashes = [
"h1:w3j7YomUQ9IfRp3MUuY0+hFX1T1cawZoj0Xsc1a46bU=",
"zh:0f5ade3801fec487641e4f7d81e28075b716c787772f9709cc2378d20f325791",
"zh:19ffa83be6b6765a4f821a17b8d260dd0f192a6c40765fa53ac65fd042cb1f65",
"zh:3ac89d33ff8ca75bdc42f31c63ce0018ffc66aa69917c18713e824e381950e4e",
"zh:81a199724e74992c8a029a968d211cb45277d95a2e88d0f07ec85127b6c6849b",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
"zh:a2e2c851a37ef97bbccccd2e686b4d016abe207a7f56bff70b10bfdf8ed1cbfd",
"zh:baf844def338d77f8a3106b1411a1fe22e93a82e3dc51e5d33b766f741c4a6a3",
"zh:bc33137fae808f91da0a9de7031cbea77d0ee4eefb4d2ad6ab7f58cc2111a7ff",
"zh:c960ae2b33c8d3327f67a3db5ce1952315146d69dfc3f1b0922242e2b218eec8",
"zh:f3ea1a25797c79c035463a1188a6a42e131f391f3cb714975ce49ccd301cda07",
"zh:f7e77c871d38236e5fedee0086ff77ff396e88964348c794cf38e578fcc00293",
"zh:fb338d5dfafab907b8608bd66cad8ca9ae4679f8c62c2435c2056a38b719baa2",
]
}

View File

@@ -0,0 +1,768 @@
aws_route_table_association.rta-iac-c: Refreshing state... [id=rtbassoc-0fd9ea480fc1bd2ec]
aws_route_table_association.rta-iac-a: Refreshing state... [id=rtbassoc-0d9f0c70e1d159ede]
aws_route_table_association.rta-iac-b: Refreshing state... [id=rtbassoc-04f3b256e59854ac7]
aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io: Refreshing state... [id=sgrule-3927496604]
aws_route.route-private-rt-datasaker-iac-0-0-0-0--0: Refreshing state... [id=r-rtb-02fa87e2873f596311080289494]
aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io: Refreshing state... [id=sgrule-3238310385]
aws_security_group.sg-iac-datasaker: Refreshing state... [id=sg-07d769a675ed05b35]
aws_security_group_rule.sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io: Refreshing state... [id=sgrule-361293936]
aws_subnet.sbn-iac-a: Refreshing state... [id=subnet-08330ae1fd7c5d77e]
aws_subnet.sbn-iac-b: Refreshing state... [id=subnet-0c8c4d1df1a2920e4]
aws_subnet.sbn-iac-c: Refreshing state... [id=subnet-06e724baf7d879769]
aws_route_table.rt-datasaker-iac: Refreshing state... [id=rtb-02fa87e2873f59631]
aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io: Refreshing state... [id=sgrule-3300624291]
aws_vpc_dhcp_options.vpc-dhcp-datasaker: Refreshing state... [id=dopt-086d99ae90c3cde26]
aws_key_pair.kp-bastion-datasaker: Refreshing state... [id=kp-bastion-datasaker]
aws_eip.eip-natgw-datasaker: Refreshing state... [id=eipalloc-08b46670f70c2d11d]
aws_vpc.vpc-datasaker: Refreshing state... [id=vpc-0b6e0b906c678a22f]
aws_security_group.sg-dev-datasaker: Refreshing state... [id=sg-0aed067b37f609a6f]
aws_vpc_dhcp_options_association.vpc-dhcp-asso-datasaker: Refreshing state... [id=dopt-086d99ae90c3cde26-vpc-0b6e0b906c678a22f]
aws_route_table.rt-datasaker-pub: Refreshing state... [id=rtb-057ad7940bd4d0e47]
aws_subnet.sbn-dev-a: Refreshing state... [id=subnet-0c875e254456809f7]
aws_security_group.sg-dmz-datasaker: Refreshing state... [id=sg-07f27eba164d59dfa]
aws_internet_gateway.igw-datasaker: Refreshing state... [id=igw-024cfe034db889aee]
aws_route_table.rt-datasaker-dev: Refreshing state... [id=rtb-0222c34fe74835820]
aws_subnet.sbn-dev-c: Refreshing state... [id=subnet-0940fd78504acbbde]
aws_subnet.sbn-dev-b: Refreshing state... [id=subnet-05672a669943fc12f]
aws_subnet.sbn-dmz-a: Refreshing state... [id=subnet-0de55619bee2411f8]
aws_subnet.sbn-dmz-b: Refreshing state... [id=subnet-0a5d787353f874684]
aws_subnet.sbn-dmz-c: Refreshing state... [id=subnet-0ee26ffc561efb292]
aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io: Refreshing state... [id=sgrule-3441164403]
aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io: Refreshing state... [id=sgrule-4096359581]
aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io: Refreshing state... [id=sgrule-4111863151]
aws_security_group_rule.sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io: Refreshing state... [id=sgrule-320777473]
aws_route.r-0-0-0-0--0: Refreshing state... [id=r-rtb-057ad7940bd4d0e471080289494]
aws_route.r-__--0: Refreshing state... [id=r-rtb-057ad7940bd4d0e472750132062]
aws_route_table_association.rta-dmz-a: Refreshing state... [id=rtbassoc-02b2175a50034917e]
aws_nat_gateway.natgw-datasaker: Refreshing state... [id=nat-0149e41fa11377dfd]
aws_route_table_association.rta-dev-a: Refreshing state... [id=rtbassoc-0459be806f7412cc4]
aws_route_table_association.rta-dev-c: Refreshing state... [id=rtbassoc-02bd4922ff04a92c9]
aws_security_group_rule.sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io: Refreshing state... [id=sgrule-807868327]
aws_security_group_rule.sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io: Refreshing state... [id=sgrule-3686484473]
aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io: Refreshing state... [id=sgrule-3688340000]
aws_route_table_association.rta-dev-b: Refreshing state... [id=rtbassoc-099a4bf3c51f94b59]
aws_launch_template.lt-dmz-bastion-datasaker: Refreshing state... [id=lt-0a499abed36cc72a3]
aws_route_table_association.rta-dmz-b: Refreshing state... [id=rtbassoc-094e0157c4a065f34]
aws_route_table_association.rta-dmz-c: Refreshing state... [id=rtbassoc-0989ba31270eaee71]
aws_route.route-private-rt-datasaker-dev-0-0-0-0--0: Refreshing state... [id=r-rtb-0222c34fe748358201080289494]
aws_autoscaling_group.ag-dmz-bastion-datasaker: Refreshing state... [id=ag-dmz-bastion-datasaker]

Note: Objects have changed outside of Terraform
Terraform detected the following changes made outside of Terraform since the
last "terraform apply":
 # aws_autoscaling_group.ag-dmz-bastion-datasaker has changed
 ~ resource "aws_autoscaling_group" "ag-dmz-bastion-datasaker" {
id = "ag-dmz-bastion-datasaker"
+ load_balancers = []
name = "ag-dmz-bastion-datasaker"
+ suspended_processes = []
+ target_group_arns = []
+ termination_policies = []
# (19 unchanged attributes hidden)
# (2 unchanged blocks hidden)
}
 # aws_eip.eip-natgw-datasaker has changed
 ~ resource "aws_eip" "eip-natgw-datasaker" {
+ association_id = "eipassoc-0a0634f3c2394e898"
id = "eipalloc-08b46670f70c2d11d"
+ network_interface = "eni-013cb7e830637be38"
+ private_dns = "ip-172-21-0-244.ap-northeast-2.compute.internal"
+ private_ip = "172.21.0.244"
tags = {
"Name" = "eip-natgw-datasaker"
}
# (8 unchanged attributes hidden)
}
 # aws_launch_template.lt-dmz-bastion-datasaker has changed
 ~ resource "aws_launch_template" "lt-dmz-bastion-datasaker" {
id = "lt-0a499abed36cc72a3"
name = "lt-dmz-bastion-datasaker"
+ security_group_names = []
tags = {
"Name" = "lt-dmz-bastion-datasaker"
}
+ vpc_security_group_ids = []
# (9 unchanged attributes hidden)
~ network_interfaces {
+ ipv4_addresses = []
+ ipv4_prefixes = []
+ ipv6_addresses = []
+ ipv6_prefixes = []
# (9 unchanged attributes hidden)
}
# (3 unchanged blocks hidden)
}
 # aws_route.route-private-rt-datasaker-iac-0-0-0-0--0 has been deleted
 - resource "aws_route" "route-private-rt-datasaker-iac-0-0-0-0--0" {
- destination_cidr_block = "0.0.0.0/0" -> null
- id = "r-rtb-02fa87e2873f596311080289494" -> null
- nat_gateway_id = "nat-0149e41fa11377dfd" -> null
- origin = "CreateRoute" -> null
- route_table_id = "rtb-02fa87e2873f59631" -> null
- state = "active" -> null
}
 # aws_route_table.rt-datasaker-dev has changed
 ~ resource "aws_route_table" "rt-datasaker-dev" {
id = "rtb-0222c34fe74835820"
~ route = [
+ {
+ carrier_gateway_id = ""
+ cidr_block = "0.0.0.0/0"
+ core_network_arn = ""
+ destination_prefix_list_id = ""
+ egress_only_gateway_id = ""
+ gateway_id = ""
+ instance_id = ""
+ ipv6_cidr_block = ""
+ local_gateway_id = ""
+ nat_gateway_id = "nat-0149e41fa11377dfd"
+ network_interface_id = ""
+ transit_gateway_id = ""
+ vpc_endpoint_id = ""
+ vpc_peering_connection_id = ""
},
]
tags = {
"Name" = "rt-datasaker-dev"
}
# (5 unchanged attributes hidden)
}
 # aws_route_table.rt-datasaker-iac has been deleted
 - resource "aws_route_table" "rt-datasaker-iac" {
- arn = "arn:aws:ec2:ap-northeast-2:508259851457:route-table/rtb-02fa87e2873f59631" -> null
- id = "rtb-02fa87e2873f59631" -> null
- owner_id = "508259851457" -> null
- propagating_vgws = [] -> null
- route = [] -> null
- tags = {
- "Name" = "rt-datasaker-iac"
} -> null
- tags_all = {
- "Name" = "rt-datasaker-iac"
} -> null
- vpc_id = "vpc-0b6e0b906c678a22f" -> null
}
 # aws_route_table.rt-datasaker-pub has changed
 ~ resource "aws_route_table" "rt-datasaker-pub" {
id = "rtb-057ad7940bd4d0e47"
~ route = [
+ {
+ carrier_gateway_id = ""
+ cidr_block = ""
+ core_network_arn = ""
+ destination_prefix_list_id = ""
+ egress_only_gateway_id = ""
+ gateway_id = "igw-024cfe034db889aee"
+ instance_id = ""
+ ipv6_cidr_block = "::/0"
+ local_gateway_id = ""
+ nat_gateway_id = ""
+ network_interface_id = ""
+ transit_gateway_id = ""
+ vpc_endpoint_id = ""
+ vpc_peering_connection_id = ""
},
+ {
+ carrier_gateway_id = ""
+ cidr_block = "0.0.0.0/0"
+ core_network_arn = ""
+ destination_prefix_list_id = ""
+ egress_only_gateway_id = ""
+ gateway_id = "igw-024cfe034db889aee"
+ instance_id = ""
+ ipv6_cidr_block = ""
+ local_gateway_id = ""
+ nat_gateway_id = ""
+ network_interface_id = ""
+ transit_gateway_id = ""
+ vpc_endpoint_id = ""
+ vpc_peering_connection_id = ""
},
]
tags = {
"Name" = "rt-datasaker-pub"
}
# (5 unchanged attributes hidden)
}
 # aws_route_table_association.rta-iac-a has been deleted
 - resource "aws_route_table_association" "rta-iac-a" {
- id = "rtbassoc-0d9f0c70e1d159ede" -> null
- route_table_id = "rtb-02fa87e2873f59631" -> null
- subnet_id = "subnet-08330ae1fd7c5d77e" -> null
}
 # aws_route_table_association.rta-iac-b has been deleted
 - resource "aws_route_table_association" "rta-iac-b" {
- id = "rtbassoc-04f3b256e59854ac7" -> null
- route_table_id = "rtb-02fa87e2873f59631" -> null
- subnet_id = "subnet-0c8c4d1df1a2920e4" -> null
}
 # aws_route_table_association.rta-iac-c has been deleted
 - resource "aws_route_table_association" "rta-iac-c" {
- id = "rtbassoc-0fd9ea480fc1bd2ec" -> null
- route_table_id = "rtb-02fa87e2873f59631" -> null
- subnet_id = "subnet-06e724baf7d879769" -> null
}
 # aws_security_group.sg-dev-datasaker has changed
 ~ resource "aws_security_group" "sg-dev-datasaker" {
~ egress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 0
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 65535
},
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 8
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "icmp"
+ security_groups = []
+ self = false
+ to_port = 8
},
]
id = "sg-0aed067b37f609a6f"
~ ingress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 22
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 22
},
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 8
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "icmp"
+ security_groups = []
+ self = false
+ to_port = 8
},
]
name = "secg-dev-datasaker"
tags = {
"Name" = "sg-dev-datasaker"
}
# (6 unchanged attributes hidden)
}
 # aws_security_group.sg-dmz-datasaker has changed
 ~ resource "aws_security_group" "sg-dmz-datasaker" {
~ egress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 0
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 65535
},
]
id = "sg-07f27eba164d59dfa"
~ ingress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 30000
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 30000
},
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 443
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 443
},
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 80
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 80
},
+ {
+ cidr_blocks = [
+ "115.178.73.2/32",
+ "115.178.73.91/32",
]
+ description = ""
+ from_port = 22
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 22
},
]
name = "secg-dmz-datasaker"
tags = {
"Name" = "sg-dmz-datasaker"
}
# (6 unchanged attributes hidden)
}
 # aws_security_group.sg-iac-datasaker has been deleted
 - resource "aws_security_group" "sg-iac-datasaker" {
- arn = "arn:aws:ec2:ap-northeast-2:508259851457:security-group/sg-07d769a675ed05b35" -> null
- description = "Security group iac-datasaker" -> null
- egress = [] -> null
- id = "sg-07d769a675ed05b35" -> null
- ingress = [] -> null
- name = "secg-iac-datasaker" -> null
- owner_id = "508259851457" -> null
- revoke_rules_on_delete = false -> null
- tags = {
- "Name" = "sg-iac-datasaker"
} -> null
- tags_all = {
- "Name" = "sg-iac-datasaker"
} -> null
- vpc_id = "vpc-0b6e0b906c678a22f" -> null
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io" {
id = "sgrule-3441164403"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io" {
id = "sgrule-3688340000"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io has been deleted
 - resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-iac-datasaker-io" {
- cidr_blocks = [
- "0.0.0.0/0",
] -> null
- from_port = 0 -> null
- id = "sgrule-3927496604" -> null
- protocol = "tcp" -> null
- security_group_id = "sg-07d769a675ed05b35" -> null
- self = false -> null
- to_port = 65535 -> null
- type = "egress" -> null
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io" {
id = "sgrule-4111863151"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io has been deleted
 - resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-iac-datasaker-io" {
- cidr_blocks = [
- "0.0.0.0/0",
] -> null
- from_port = 8 -> null
- id = "sgrule-3238310385" -> null
- protocol = "icmp" -> null
- security_group_id = "sg-07d769a675ed05b35" -> null
- self = false -> null
- to_port = 0 -> null
- type = "ingress" -> null
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io" {
id = "sgrule-4096359581"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io has been deleted
 - resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-iac-datasaker-io" {
- cidr_blocks = [
- "0.0.0.0/0",
] -> null
- from_port = 22 -> null
- id = "sgrule-3300624291" -> null
- protocol = "tcp" -> null
- security_group_id = "sg-07d769a675ed05b35" -> null
- self = false -> null
- to_port = 22 -> null
- type = "ingress" -> null
}
 # aws_security_group_rule.sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io" {
id = "sgrule-3686484473"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io" {
id = "sgrule-807868327"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io has changed
 ~ resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io" {
id = "sgrule-320777473"
# (7 unchanged attributes hidden)
}
 # aws_security_group_rule.sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io has been deleted
 - resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-iac-datasaker-io" {
- cidr_blocks = [
- "0.0.0.0/0",
] -> null
- from_port = 8 -> null
- id = "sgrule-361293936" -> null
- protocol = "icmp" -> null
- security_group_id = "sg-07d769a675ed05b35" -> null
- self = false -> null
- to_port = 8 -> null
- type = "egress" -> null
}
 # aws_subnet.sbn-dev-a has changed
 ~ resource "aws_subnet" "sbn-dev-a" {
id = "subnet-0c875e254456809f7"
~ tags = {
+ "kops.k8s.io/instance-group/dev-data-a" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-a" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-a" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-a" = "true"
+ "kops.k8s.io/instance-group/dev-process-a" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2a" = "true"
# (6 unchanged elements hidden)
}
~ tags_all = {
+ "kops.k8s.io/instance-group/dev-data-a" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-a" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-a" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-a" = "true"
+ "kops.k8s.io/instance-group/dev-process-a" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2a" = "true"
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
 # aws_subnet.sbn-dev-b has changed
 ~ resource "aws_subnet" "sbn-dev-b" {
id = "subnet-05672a669943fc12f"
~ tags = {
+ "kops.k8s.io/instance-group/dev-data-b" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-b" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-b" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-b" = "true"
+ "kops.k8s.io/instance-group/dev-process-b" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2b" = "true"
# (6 unchanged elements hidden)
}
~ tags_all = {
+ "kops.k8s.io/instance-group/dev-data-b" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-b" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-b" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-b" = "true"
+ "kops.k8s.io/instance-group/dev-process-b" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2b" = "true"
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
 # aws_subnet.sbn-dev-c has changed
 ~ resource "aws_subnet" "sbn-dev-c" {
id = "subnet-0940fd78504acbbde"
~ tags = {
+ "kops.k8s.io/instance-group/dev-data-c" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-c" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-c" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-c" = "true"
+ "kops.k8s.io/instance-group/dev-process-c" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2c" = "true"
# (6 unchanged elements hidden)
}
~ tags_all = {
+ "kops.k8s.io/instance-group/dev-data-c" = "true"
+ "kops.k8s.io/instance-group/dev-data-druid-c" = "true"
+ "kops.k8s.io/instance-group/dev-data-kafka-c" = "true"
+ "kops.k8s.io/instance-group/dev-mgmt-c" = "true"
+ "kops.k8s.io/instance-group/dev-process-c" = "true"
+ "kops.k8s.io/instance-group/master-ap-northeast-2c" = "true"
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
 # aws_subnet.sbn-iac-a has been deleted
 - resource "aws_subnet" "sbn-iac-a" {
- arn = "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-08330ae1fd7c5d77e" -> null
- assign_ipv6_address_on_creation = false -> null
- availability_zone = "ap-northeast-2a" -> null
- availability_zone_id = "apne2-az1" -> null
- cidr_block = "172.21.16.0/23" -> null
- enable_dns64 = false -> null
- enable_resource_name_dns_a_record_on_launch = true -> null
- enable_resource_name_dns_aaaa_record_on_launch = false -> null
- id = "subnet-08330ae1fd7c5d77e" -> null
- ipv6_native = false -> null
- map_customer_owned_ip_on_launch = false -> null
- map_public_ip_on_launch = false -> null
- owner_id = "508259851457" -> null
- private_dns_hostname_type_on_launch = "resource-name" -> null
- tags = {
- "Name" = "sbn-iac-a.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- tags_all = {
- "Name" = "sbn-iac-a.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- vpc_id = "vpc-0b6e0b906c678a22f" -> null
}
 # aws_subnet.sbn-iac-b has been deleted
 - resource "aws_subnet" "sbn-iac-b" {
- arn = "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-0c8c4d1df1a2920e4" -> null
- assign_ipv6_address_on_creation = false -> null
- availability_zone = "ap-northeast-2b" -> null
- availability_zone_id = "apne2-az2" -> null
- cidr_block = "172.21.18.0/23" -> null
- enable_dns64 = false -> null
- enable_resource_name_dns_a_record_on_launch = true -> null
- enable_resource_name_dns_aaaa_record_on_launch = false -> null
- id = "subnet-0c8c4d1df1a2920e4" -> null
- ipv6_native = false -> null
- map_customer_owned_ip_on_launch = false -> null
- map_public_ip_on_launch = false -> null
- owner_id = "508259851457" -> null
- private_dns_hostname_type_on_launch = "resource-name" -> null
- tags = {
- "Name" = "sbn-iac-b.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- tags_all = {
- "Name" = "sbn-iac-b.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- vpc_id = "vpc-0b6e0b906c678a22f" -> null
}
 # aws_subnet.sbn-iac-c has been deleted
 - resource "aws_subnet" "sbn-iac-c" {
- arn = "arn:aws:ec2:ap-northeast-2:508259851457:subnet/subnet-06e724baf7d879769" -> null
- assign_ipv6_address_on_creation = false -> null
- availability_zone = "ap-northeast-2c" -> null
- availability_zone_id = "apne2-az3" -> null
- cidr_block = "172.21.20.0/23" -> null
- enable_dns64 = false -> null
- enable_resource_name_dns_a_record_on_launch = true -> null
- enable_resource_name_dns_aaaa_record_on_launch = false -> null
- id = "subnet-06e724baf7d879769" -> null
- ipv6_native = false -> null
- map_customer_owned_ip_on_launch = false -> null
- map_public_ip_on_launch = false -> null
- owner_id = "508259851457" -> null
- private_dns_hostname_type_on_launch = "resource-name" -> null
- tags = {
- "Name" = "sbn-iac-c.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- tags_all = {
- "Name" = "sbn-iac-c.datasaker"
- "SubnetType" = "Private"
- "kubernetes.io/cluster/datasaker" = "owned"
- "kubernetes.io/role/elb" = "1"
- "kubernetes.io/role/internal-elb" = "1"
} -> null
- vpc_id = "vpc-0b6e0b906c678a22f" -> null
}
 # aws_vpc.vpc-datasaker has changed
 ~ resource "aws_vpc" "vpc-datasaker" {
~ dhcp_options_id = "dopt-021d4c0e9e138e269" -> "dopt-086d99ae90c3cde26"
id = "vpc-0b6e0b906c678a22f"
tags = {
"Name" = "vpc-datasaker"
}
# (18 unchanged attributes hidden)
}
 # aws_vpc_dhcp_options.vpc-dhcp-datasaker has changed
 ~ resource "aws_vpc_dhcp_options" "vpc-dhcp-datasaker" {
id = "dopt-086d99ae90c3cde26"
+ netbios_name_servers = []
+ ntp_servers = []
tags = {
"Name" = "vpc-dhcp-datasaker"
}
# (5 unchanged attributes hidden)
}
Unless you have made equivalent changes to your configuration, or ignored the
relevant attributes using ignore_changes, the following plan may include
actions to undo or respond to these changes.

─────────────────────────────────────────────────────────────────────────────
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
~ update in-place

Terraform will perform the following actions:
 # aws_subnet.sbn-dev-a will be updated in-place
 ~ resource "aws_subnet" "sbn-dev-a" {
id = "subnet-0c875e254456809f7"
~ tags = {
- "kops.k8s.io/instance-group/dev-data-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-a" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2a" = "true" -> null
# (6 unchanged elements hidden)
}
~ tags_all = {
- "kops.k8s.io/instance-group/dev-data-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-a" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-a" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2a" = "true" -> null
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
 # aws_subnet.sbn-dev-b will be updated in-place
 ~ resource "aws_subnet" "sbn-dev-b" {
id = "subnet-05672a669943fc12f"
~ tags = {
- "kops.k8s.io/instance-group/dev-data-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-b" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2b" = "true" -> null
# (6 unchanged elements hidden)
}
~ tags_all = {
- "kops.k8s.io/instance-group/dev-data-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-b" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-b" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2b" = "true" -> null
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
 # aws_subnet.sbn-dev-c will be updated in-place
 ~ resource "aws_subnet" "sbn-dev-c" {
id = "subnet-0940fd78504acbbde"
~ tags = {
- "kops.k8s.io/instance-group/dev-data-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-c" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2c" = "true" -> null
# (6 unchanged elements hidden)
}
~ tags_all = {
- "kops.k8s.io/instance-group/dev-data-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-druid-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-data-kafka-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-mgmt-c" = "true" -> null
- "kops.k8s.io/instance-group/dev-process-c" = "true" -> null
- "kops.k8s.io/instance-group/master-ap-northeast-2c" = "true" -> null
# (6 unchanged elements hidden)
}
# (14 unchanged attributes hidden)
}
Plan: 0 to add, 3 to change, 0 to destroy.

─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.

View File

@@ -0,0 +1,133 @@
resource "aws_route_table" "rt-datasaker-dev" {
tags = {
"Name" = "rt-datasaker-dev"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route" "route-private-rt-datasaker-dev-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.natgw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-dev.id
}
resource "aws_subnet" "sbn-dev-a" {
availability_zone = "ap-northeast-2a"
cidr_block = "172.21.8.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-a.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dev-b" {
availability_zone = "ap-northeast-2b"
cidr_block = "172.21.10.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-b.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dev-c" {
availability_zone = "ap-northeast-2c"
cidr_block = "172.21.12.0/23"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dev-c.datasaker"
"SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route_table_association" "rta-dev-a" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-a.id
}
resource "aws_route_table_association" "rta-dev-b" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-b.id
}
resource "aws_route_table_association" "rta-dev-c" {
route_table_id = aws_route_table.rt-datasaker-dev.id
subnet_id = aws_subnet.sbn-dev-c.id
}
resource "aws_security_group" "sg-dev-datasaker" {
description = "Security group dev-datasaker"
name = "secg-dev-datasaker"
tags = {
"Name" = "sg-dev-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 8
type = "ingress"
}
resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 8
protocol = "icmp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 8
type = "egress"
}
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dev-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "tcp"
security_group_id = aws_security_group.sg-dev-datasaker.id
to_port = 65535
type = "egress"
}

View File

@@ -0,0 +1,252 @@
output "sbn_dmz_a_id" {
value = aws_subnet.sbn-dmz-a.id
}
output "sbn_dmz_b_id" {
value = aws_subnet.sbn-dmz-b.id
}
output "sbn_dmz_c_id" {
value = aws_subnet.sbn-dmz-c.id
}
resource "aws_subnet" "sbn-dmz-a" {
availability_zone = "ap-northeast-2a"
cidr_block = "172.21.0.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-a.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dmz-b" {
availability_zone = "ap-northeast-2b"
cidr_block = "172.21.1.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-b.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_subnet" "sbn-dmz-c" {
availability_zone = "ap-northeast-2c"
cidr_block = "172.21.2.0/24"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"Name" = "sbn-dmz-c.datasaker"
"SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route_table_association" "rta-dmz-a" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-a.id
}
resource "aws_route_table_association" "rta-dmz-b" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-b.id
}
resource "aws_route_table_association" "rta-dmz-c" {
route_table_id = aws_route_table.rt-datasaker-pub.id
subnet_id = aws_subnet.sbn-dmz-c.id
}
resource "aws_security_group" "sg-dmz-datasaker" {
description = "Security group dmz-datasaker"
name = "secg-dmz-datasaker"
tags = {
"Name" = "sg-dmz-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-tcp-22to22-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 22
# protocol = "tcp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 22
# type = "ingress"
# }
resource "aws_security_group_rule" "sgr-from-115-178-73-2--32-ingress-tcp-22to22-dmz-datasaker-io" {
cidr_blocks = ["115.178.73.2/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "sgr-from-115-178-73-91--32-ingress-tcp-22to22-dmz-datasaker-io" {
cidr_blocks = ["115.178.73.91/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 22
type = "ingress"
}
# resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-ingress-icmp-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 8
# protocol = "icmp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 8
# type = "ingress"
# }
# resource "aws_security_group_rule" "sgr-to-0-0-0-0--0-egress-icmp-dmz-datasaker-io" {
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 8
# protocol = "icmp"
# security_group_id = aws_security_group.sg-dmz-datasaker.id
# to_port = 8
# type = "egress"
# }
resource "aws_security_group_rule" "sgr-from-0-0-0-0--0-engress-tcp-all-dmz-datasaker-io" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "tcp"
security_group_id = aws_security_group.sg-dmz-datasaker.id
to_port = 65535
type = "egress"
}
resource "aws_launch_template" "lt-dmz-bastion-datasaker" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 20
volume_type = "gp3"
}
}
image_id = "ami-0ea5eb4b05645aa8a"
instance_type = "t3.small"
key_name = aws_key_pair.kp-bastion-datasaker.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 3
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "lt-dmz-bastion-datasaker"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.sg-dmz-datasaker.id]
}
# tag_specifications {
# resource_type = "instance"
# tags = {
# "Name" = "lt-dmz-bastion-datasaker"
# }
# }
# tag_specifications {
# resource_type = "volume"
# tags = {
# "Name" = "master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io"
# }
# }
tags = {
"Name" = "lt-dmz-bastion-datasaker"
}
# user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.ap-northeast-2.dev.datasaker.io_user_data")
}
resource "aws_autoscaling_group" "ag-dmz-bastion-datasaker" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.lt-dmz-bastion-datasaker.id
version = aws_launch_template.lt-dmz-bastion-datasaker.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "ag-dmz-bastion-datasaker"
protect_from_scale_in = false
tag {
key = "Name"
propagate_at_launch = true
value = "ag-dmz-bastion-datasaker"
}
vpc_zone_identifier = [aws_subnet.sbn-dmz-a.id,aws_subnet.sbn-dmz-b.id]
}
resource "aws_eip" "eip-natgw-datasaker" {
# instance = aws_instance.web1-ec2.id
vpc = true
tags = {
Name = "eip-natgw-datasaker"
}
}
resource "aws_nat_gateway" "natgw-datasaker" {
allocation_id = aws_eip.eip-natgw-datasaker.id
subnet_id = aws_subnet.sbn-dmz-a.id
tags = {
Name = "natgw-datasaker"
}
# To ensure proper ordering, it is recommended to add an explicit dependency
# on the Internet Gateway for the VPC.
depends_on = [aws_internet_gateway.igw-datasaker]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
{
"version": 4,
"terraform_version": "1.1.9",
"serial": 1058,
"lineage": "0d7102e1-4b04-a7c0-069c-c81a4ba42c0d",
"outputs": {},
"resources": []
}

View File

@@ -0,0 +1,93 @@
terraform {
required_version = ">= 0.15.0"
required_providers {
aws = {
"configuration_aliases" = [aws.files]
"source" = "hashicorp/aws"
"version" = ">= 4.0.0"
}
}
}
provider "aws" {
region = "ap-northeast-2"
}
provider "aws" {
alias = "files"
region = "ap-northeast-2"
}
output "vpc_datasaker_id" {
value = aws_vpc.vpc-datasaker.id
}
output "vpc_datasaker_cidr_block" {
value = aws_vpc.vpc-datasaker.cidr_block
}
resource "aws_vpc" "vpc-datasaker" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.21.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"Name" = "vpc-datasaker"
}
}
resource "aws_vpc_dhcp_options" "vpc-dhcp-datasaker" {
domain_name = "ap-northeast-2.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"Name" = "vpc-dhcp-datasaker"
}
}
resource "aws_vpc_dhcp_options_association" "vpc-dhcp-asso-datasaker" {
dhcp_options_id = aws_vpc_dhcp_options.vpc-dhcp-datasaker.id
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_internet_gateway" "igw-datasaker" {
tags = {
"Name" = "igw-datasaker"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_key_pair" "kp-bastion-datasaker" {
key_name = "kp-bastion-datasaker"
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDv9Bk/20f0xHLQN1Mnub0VwsbRw7ggubeUZ+pUVaX9BD7uUud/ITktmTArbabLJLGgWx64la6+6VuQHauzX/cpMp4dVxoaySQDGPsB+V0WnXaq0pWop5BoJaPO75lpk/Kp7NFtn9x3315Rqmis1Df1UrQehMkqunnr2jWkil6iueAckztpsnqxlb8S+uVYiM7C4HsVx8XdOT3WtfUv+hzDlejy11nzi5T4HMT70O107N4g5CrEapluc7M3NfxCFhz5Gxu8P0dfJKLs9fFT4E8DRfGly5/cDcKbiJHSAZYRN6UwKr3z7LAw8aIW8JWflXn1fMZ92qdiT04kN8ZdVzyMpUiWMXJQPrfI2EHT/OHAympzKrXnT98oIqJANE4Eq72OG9Hrb6Tauk8Bde5/v3P9d7m5Zi9tx+01PZ1JQR+1dkJeV3Am6mjKWrxIowKPol2chnARoU7y1rEZGGi+09bD5hUq7KW6z61DUIlCMYF0Oq0IMs/voQP8zqpDmvSPNJc= hsgahm@ws-ubuntu"
tags = {
"Name" = "kp-bastion-datasaker"
}
}
resource "aws_route_table" "rt-datasaker-pub" {
tags = {
"Name" = "rt-datasaker-pub"
}
vpc_id = aws_vpc.vpc-datasaker.id
}
resource "aws_route" "r-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-pub.id
}
resource "aws_route" "r-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.igw-datasaker.id
route_table_id = aws_route_table.rt-datasaker-pub.id
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,273 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z072735718G25WNVKU834"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ModifyVolume",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterTargets",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1,50 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:ModifyNetworkInterfaceAttribute",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-a
InstanceGroupRole: Node
NodeupConfigHash: Q85TE/V9HxgnA5xKGRYrJfgXmGE5+rCZ1GJASWQ/GPE=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-b
InstanceGroupRole: Node
NodeupConfigHash: kf3dJ1SjdlO0c/UC6L3UzWB73HR/Az7gIc1qy8Koisg=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-c
InstanceGroupRole: Node
NodeupConfigHash: bUQw6p3VmVBXzspF9eyfeIhthTy8JshdVjdM4O3TfGo=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-a
InstanceGroupRole: Node
NodeupConfigHash: nnxeoTtGPiOtgDvp7cOTcBrm40EIMijn9OZMlwmlQ6I=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-b
InstanceGroupRole: Node
NodeupConfigHash: mN0L7VdMkoLAhv46mATyltMs5Kr9sI4BSgkg8PG+IJc=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-a
InstanceGroupRole: Node
NodeupConfigHash: Iq9X//Sll3FjhJvy7RIuzBvhmFs+AjtCzz8V97KAYWM=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-b
InstanceGroupRole: Node
NodeupConfigHash: t2aCaXecWpS9pwLOKIb8kPih6JP5vPDaz62JVPOnJG8=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-c
InstanceGroupRole: Node
NodeupConfigHash: 9+uvjmv1ysTusIQEDm+zfrqfOfsZs+Sn6hUXJ5jl5xY=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2a
InstanceGroupRole: Master
NodeupConfigHash: ymgfHaOypQ5PSYZMRskqJMEwwq0wytxVeCScbrEYXqQ=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2b
InstanceGroupRole: Master
NodeupConfigHash: LossyPq4Na2LUuvlTDeLrVynoNeEIXBiZozuBvYcGJA=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2c
InstanceGroupRole: Master
NodeupConfigHash: 1bFoJNbAl2IeiaRgh7X4jwmCV0ZYcDCAD5B+ZuU7oig=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,252 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2022-09-06T05:44:08Z"
generation: 1
name: dev.datasaker.io
spec:
api:
loadBalancer:
class: Classic
type: Public
authorization:
rbac: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
dnsZone: Z072735718G25WNVKU834
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: main
version: 3.5.4
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: events
version: 3.5.4
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.21.3
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 115.178.73.2/32
- 115.178.73.91/32
kubernetesVersion: 1.23.10
masterInternalName: api.internal.dev.datasaker.io
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.dev.datasaker.io
networkCIDR: 172.21.0.0/16
networkID: vpc-03cbb88e181ccb46e
networking:
calico:
encapsulationMode: ipip
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 115.178.73.2/32
- 115.178.73.91/32
subnets:
- cidr: 172.21.1.0/24
id: subnet-021536c4f12971c74
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.21.2.0/24
id: subnet-0c90842daa15aa7c7
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.21.3.0/24
id: subnet-0ae3ab7ae241fe761
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.21.0.0/28
id: subnet-0d762a41fb41d63e5
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.21.0.16/28
id: subnet-0b4f418020349fb84
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.21.0.32/28
id: subnet-05b9f4f02955c3307
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
topology:
dns:
type: Public
masters: private
nodes: private

View File

@@ -0,0 +1,792 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-attacher-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- csi.storage.k8s.io
resources:
- csinodeinfos
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-provisioner-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- get
- list
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- delete
- update
- create
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-resizer-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-snapshotter-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- create
- get
- list
- watch
- update
- delete
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-attacher-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-attacher-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-provisioner-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-provisioner-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-resizer-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-resizer-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-snapshotter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-snapshotter-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-getter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-csi-node-role
subjects:
- kind: ServiceAccount
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- node
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=2
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: kubelet-dir
- mountPath: /csi
name: plugin-dir
- mountPath: /dev
name: device-dir
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock
name: node-driver-registrar
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: plugin-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: ebs-csi-node-sa
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet
type: Directory
name: kubelet-dir
- hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
name: plugin-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
name: registration-dir
- hostPath:
path: /dev
type: Directory
name: device-dir
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- controller
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --k8s-tag-cluster-id=dev.datasaker.io
- --extra-tags=KubernetesCluster=dev.datasaker.io
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: key_id
name: aws-secret
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: access_key
name: aws-secret
optional: true
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --feature-gates=Topology=true
- --extra-create-metadata
- --leader-election=true
- --default-fstype=ext4
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119
imagePullPolicy: IfNotPresent
name: csi-provisioner
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b
imagePullPolicy: IfNotPresent
name: csi-attacher
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4
imagePullPolicy: IfNotPresent
name: csi-resizer
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: ebs-csi-controller-sa
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: socket-dir
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver

View File

@@ -0,0 +1,69 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 530752f323a7573cedaa993ac169181c2d36d70e1cb4950d3c1a3347ac586826
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 1060dbbcbf4f9768081b838e619da1fc3970ef2b86886f8e5c6ff3e2842c2aa3
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- id: k8s-1.23
manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml
manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4
name: leader-migration.rbac.addons.k8s.io
selector:
k8s-addon: leader-migration.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 3e67c5934d55a5f5ebbd8a97e428aa6d9749812ba209a3dc1f1cb9449ee75c26
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.22
manifest: networking.projectcalico.org/k8s-1.22.yaml
manifestHash: 35704fe8643eb1cf13079a6580590cb32c2b69daf2047787863308fc4c90e88f
name: networking.projectcalico.org
selector:
role.kubernetes.io/networking: "1"
version: 9.99.0
- id: k8s-1.17
manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml
manifestHash: 80c38e6bb751e5c9e58a013b9c09b70d0ca34383d15889e09df214090c52713c
name: aws-ebs-csi-driver.addons.k8s.io
selector:
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
version: 9.99.0

View File

@@ -0,0 +1,385 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.8.6@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@@ -0,0 +1,140 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.24.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z072735718G25WNVKU834
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.24.1@sha256:d0bff3dff30ec695702eb954b7568e3b5aa164f458a70be1d3f5194423ef90a6
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@@ -0,0 +1,225 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.24.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/kops-controller:1.24.1@sha256:dec29a983e633e2d3321fef86e6fea211784b2dc9b62ce735d708e781ef4919c
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@@ -0,0 +1,52 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
rules:
- apiGroups:
- coordination.k8s.io
resourceNames:
- cloud-provider-extraction-migration
resources:
- leases
verbs:
- create
- list
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system::leader-locking-migration
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:kube-controller-manager
- kind: ServiceAccount
name: kube-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@@ -0,0 +1,118 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-csi-1-21
parameters:
encrypted: "true"
type: gp3
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.24.1@sha256:b969a40a66d7c9781b8f393c4bd1cc90828c45b0419e24bf2192be9a10fd6c44
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7140152701493782195543542031"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV
J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc
terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo
blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW
XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz
qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY
OwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX
DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj
l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT
INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu
OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3
PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx
KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv
fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA
Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm
gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ
TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR
Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo
TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt
xAs2TdYn8CrGqWBeqo0hBw==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw
NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2
rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3
zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK
XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/
8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN
ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH
mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5
LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV
oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT
o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L
0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG
X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN
MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh
91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74
9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU
187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN
Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i
c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH
yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo
hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR
vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O
eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8
TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My
27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS
otB/s616SciuS4GfxB8=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS
EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s
jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F
GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r
CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L
b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/
em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd
4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h
+GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep
kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU
98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR
CnTy7QvfVnofyg==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY
3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B
Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op
wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s
NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC
HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/
JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v
tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK
BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy
fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC
T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR
5SKrimFkB4A+pg==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5
MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY
WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa
CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph
IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp
snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c
0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww
+ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs
rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV
RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0
3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R
WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW
SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+
X0c+UsfH
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7140152701265059592804081616"
etcd-clients-ca: "7140152701334538361835018685"
etcd-manager-ca-events: "7140152701353152116999188294"
etcd-manager-ca-main: "7140152701343712646643132357"
etcd-peers-ca-events: "7140152701262321031184890800"
etcd-peers-ca-main: "7140152701288092082058775186"
kubernetes-ca: "7140152701493782195543542031"
service-account: "7140152701518733293461068249"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2a
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV
J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc
terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo
blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW
XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz
qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY
OwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX
DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj
l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT
INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu
OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3
PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx
KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv
fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA
Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm
gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ
TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR
Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo
TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt
xAs2TdYn8CrGqWBeqo0hBw==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw
NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2
rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3
zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK
XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/
8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN
ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH
mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5
LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV
oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT
o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L
0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG
X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN
MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh
91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74
9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU
187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN
Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i
c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH
yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo
hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR
vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O
eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8
TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My
27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS
otB/s616SciuS4GfxB8=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS
EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s
jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F
GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r
CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L
b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/
em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd
4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h
+GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep
kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU
98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR
CnTy7QvfVnofyg==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY
3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B
Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op
wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s
NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC
HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/
JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v
tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK
BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy
fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC
T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR
5SKrimFkB4A+pg==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5
MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY
WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa
CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph
IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp
snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c
0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww
+ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs
rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV
RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0
3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R
WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW
SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+
X0c+UsfH
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7140152701265059592804081616"
etcd-clients-ca: "7140152701334538361835018685"
etcd-manager-ca-events: "7140152701353152116999188294"
etcd-manager-ca-main: "7140152701343712646643132357"
etcd-peers-ca-events: "7140152701262321031184890800"
etcd-peers-ca-main: "7140152701288092082058775186"
kubernetes-ca: "7140152701493782195543542031"
service-account: "7140152701518733293461068249"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2b
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsE+G9XrtFbtxfNVKq0xV
J9N2CW7fr8zQAbCBCwOw6KaSnK6qdmlBnm6y2jua6mZpt9BsYeimXO9YQmmZH5vc
terv+xW9LNsR8stv8sWIpnWl2NKn+Y5tO6PCJTqaYeBWIxVZC4q5Ly0YDxa1J6Qo
blcN1TMyohiWYppsPB/FfrIImgHjH9u3BfQHKPTsq+AzO9fC72mbqm2PIFkYVvuW
XPb7KQs7eWEC2tp+RlB6qhCctlARp5mN0px1vrD/X8CzOyde8ofhpntE/8jpfQcz
qNhQ6mMhDhYhUEJV+tlq+Q/RpLtP3af77RfvCfnyxN3LRCPKGOYK5F/fENr/5hqY
OwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxIyJYeMuVeJOZ/QMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkwNDA2MzkwOVoX
DTMyMDkwMzA2MzkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJJdVBOGSht7BkQVLj
l+lEyyiTe63lIWQhDpWgWqvE2OpNHhp2bdIYSOk7+/rFczt0lc0bCvFZLCZ7gnXT
INQZLWGBWbraQPoB8letkjYgxvTvAMaxtA/5lNW+zuitAJvXVYZEVR2xVw2EQHnu
OATzRM3mnlig7I2MARmUn5gZeGuMof7Aqh1e051Dsa579mRSDQTVoP19cjTslGU3
PsBbTx9IYJXPFJETa8BxYQv11ejT1mJIDAZ4M9bWBWZFRnPhtzQUDcqUBZmWdqkx
KcjfMXRoKZQDALfDeUOv0nEkgbzkIE04haUvbPiWKfSzzd1ILumW2nH6zzHaXGmv
fSStAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBTcw+Hd3Bl1EPbzet1X5psLukt+9TANBgkqhkiG9w0BAQsFAAOCAQEA
Es2LiPmZkRUN3VA+H0p9QiCXX5iTvWoQroGy1NCrYBx3LPK3fP2ZnmG6KNRC2rxm
gmrjhWx+eCJNS7El2ZLHmDmqekiKfmcFPnb/buVLWv0NdBHdVPdZVrLUD5tzO7UJ
TBjuGwiraovMYNLGB9YqPDjnHzL9o9QkL98G3Q3BxLwkputU77Xgot7khCDbmBAR
Ey6UAxL0E4vYF8Oz8KBwC3xBXFPUNClKafbYsKZim5bAw7VA0hFETmC7n6kmHcmo
TYkKDnepzq+wM0d52gvSMKPXx+2OjIXs0h0a5a34TmPd0qm7wj3OJAhCPL9wE3Vt
xAs2TdYn8CrGqWBeqo0hBw==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxIyJYiDj+oMvtm9MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5MDMw
NjM5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMBrFSCeEEd3fqbwfK7IRQ/m/LlVaL7EMMmDs9a2
rrbzbHCJzHjt8oqo4whqwfL9/Ure7C1baFzEme2OxS4QK/MSJDpv/W+wKg+n5Yh3
zl8Aj07T6vjNGITDWalIZhAO7LeraOcF+m985cIFGOHYtiAWD0Ii7hpLw5rX4xTK
XcWQ74TjfDlemJCHeDe60Lx6pZFPVqMm2NbI4DT/PtvrObq5gls7F2G2T30gJ84/
8O1+ZlOg6/P0God8eZPSUT/A3itTNhoxqMphOJpm7KhMA/JC2MxadOlRCUPoC5JN
ZSTt62F9hkd1fYJ2pBfUb2on495yOsRTvXVpGkh4+8LJxBsCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFO1Ee5HNrlH
mneOqWSS/C4DBwDnMA0GCSqGSIb3DQEBCwUAA4IBAQCv6jgy7DRQjitHGTgiqay5
LCe2LtFPgksE1yVb07T7eUYqKZh8qZl8vUZWOynPBTLkCrHjPTvoS34aWtO7CZSV
oiGl6CNTU8l8srUzkgp6svVIVifBGuGX2btoju17dnzNtNIjjdr8wPaXiEYxvDOT
o1YVksVw0fZfw7G0TYfQVpAN0eiZdd6j/7AKNADkpjaAkHp0pPYNDWQO6Fa4VK5L
0ZD+tuoWr9I28izE7cBO0lx5nvMK7W28hZh6E0tGHfkej4rx2N7dMkO3SDbi+kVG
X9tB7+bqt9lO62vqMGFWCeqS0zcmF1l+a0lN532ni7H5UeEGZ+A9R1cnPBni5JgS
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxIyJYjFsQalPe9GMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTA0MDYzOTA5WhcN
MzIwOTAzMDYzOTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5tZ6eB7tCCiTOTh8eOsh
91Uv+2Pvq9TINuiKnFIy0jlRQ+q6M4vU03Gjf/KdNfKlHmYqDrFNeCgyuiv87G74
9oojSlx7NuBt2TXRgw7YetAep5B34BUMu6+PnWtE9zCNi4JSWbZlT66KyaghfpJU
187733VPK5TRnr6zbYWHFVYigau+fm3BpfA5gKqWqaXEC0JeuHptSNnn4K8z1fRN
Ay2PUeEtPV46jazTj+P5SMjueziHBfkXQCkwfeUaXq+ALETMhjKdZlnsWOQqdz5i
c08jpXbWXo0UmFgpu4ohMfHqU34v8Umcyk1q1yTyXnSM1/DPiL/xAHjAXLf2hjIH
yQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUCoe5yB4CMAyieDVvCN5JFGpNHmUwDQYJKoZIhvcNAQELBQADggEBAKfo
hXZIwMMaQrCCE3RUfvspCsHsPOkX4Tt6EqTRAhh2it3+8t5ECW9upcycc85UDzJR
vJs0AHk4PwYv2AVgr5rVmTlww502dIQz+JiWKTLgjyOD/fpWOchYKZMO/xHsY55O
eKyFngIlvTKcOPvrrVINm6waf54lDH+t4J4fb/8P49HC4JZupFdHWRQiFsYoSMY8
TdNrNbMninl9jua+oUw6Tfib7iOtWZN3C1EIr5bKLHTZwGTjmhq2s4JHoew6V9My
27yq06SiVZflTAv78J3RdCp/HT7UjsncL6U4M5rXvN7Zi6gO4E9BSw2yypvtdiWS
otB/s616SciuS4GfxB8=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYikJ+CvzSfFMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKh9PSkmnXpG4UlVPGRbeQ7BZS
EKxKXJTTMIqhCXCrfxPVE9gKRe8Qfq9WIVURFy60Q2ot9df6VUj73MVCm7CQOJ5s
jqJVDRpcNpVANJJCElxAVzelQf0K0oyxeVL8f0bX9zYnxoddR41bBvUPz9lg/01F
GSPk1IwbDJ95I8vQD+WS4aGJ1JW7CSE2Q6VfeOdxYRwzD4yhkit/ixhQNG0tLa1r
CQyIz8/bGT49efyP5zLTRe55hAkwVZmbzGcOFcjfkd6oLb3AiuU1DuitI455wM3L
b9ds59DGyxmPMH0qoyGdK0JScZp4j4jv/wPHjafg/NVq1/v0nRlv+/mojTWHAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTrP+8tkkcWH5BAUsADSecDgM+rbjANBgkqhkiG9w0BAQsFAAOCAQEAj6qP+x9/
em1MUrypgL+XnvxlBmRktBWEoRz6fldaP7HIN4pNi5OBA0rVdS74BUwkTSoy3AVd
4SSfNdTvqXVEwUfZbopZYcTuemAd5NHo9nycTO5Tse07NuqcxpQ4dTpz3K2iB50h
+GJYKx+W0IHPb/+Pq+ZPXqFcdKFjPGbtZfOuVDffyBaTHCGmkSV/cgG5Zfi3c9Ep
kvK0j8QhcJ5gahqUoum8lDRHJBscUId74qnEXZpwEx0yBk4cPxGdw1M7DnREeVNU
98hAbdeRpgDzXoMR0yNCikTOwk/aU4OhEJUWiaLfDSvMFznG2OdNgP71afsRNRrR
CnTy7QvfVnofyg==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxIyJYeC/qJ4t7uwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkwNDA2MzkwOVoXDTMy
MDkwMzA2MzkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuXpCZLE//rM01hgLinOg3mLY
3PVsZLCOZgV+KvsuXQEwj/a0E9w6v22KrxJQic4Al8ebFcfxJ4UzB0GSAKazdj3B
Q60WYIx+4/8uLNyEsR49jiCCbNHvjTYsGeC1EiXXN2h6aeJJ/L6y9YxFaArZ13Op
wZhtA+0ubPkaMYKsWdVcipJwNH5PB1v/8JogKshTwMN506XfmkGcydIl+i9yhX4s
NgwkjXgrMNlgvccswSzRn/CPqhqcOgNe0zbonL6pFBju0KC0zqyFODpnpMwrfPMC
HIxLdQpFd2zDV30mSu0/TRILhI4dYa+/gC7ucdzJiHVjE1FXpUDUgT8sIVGpAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBSDAp7bV5g/3hpTUO/Ebaf3pw30yjANBgkqhkiG9w0BAQsFAAOCAQEAc1RU66y/
JBGVeC6Xo86LFDfIYHcd9XA5J07l32NHwMvc7dv+wpG1funG1pi/ucdCh9HXzu+v
tx3QcG/a61YKJiJ2btJWNvkoqKdzkHBbr4kBcOHTWmq8XLUFBq3pVYMy7P/HZiTK
BhRDLwHE5qQO9IxjyqloMlc/WOVVrfieHIHHRg0mvAs0j6DJR1axqnKpgytV/sTy
fwnHV+RNOh8oy33/aeHfgZ0kJejRFmUC3+fTzI1onmaJXD1UHZfMElrHrvCW76eC
T+Zfllo7km3Oyje+2B4W76/q2G8nyT8rFxo9+nB6RGVGslPYLlbF0cFLCCC998HR
5SKrimFkB4A+pg==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxIyJYfejUTVi0qSMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MDQwNjM5MDlaFw0zMjA5
MDMwNjM5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUHtUkjT+GYZDQQlIo++9JgKrI+eHjY
WeUH6IREmQYGGJCPkWxWI0DaB1glglMlJU4hTa1BHhnu+Vlzj3vOx6G9EiatRBRa
CEcZiSEnc4Tvr91lQeRSSApZ76CnL/7Tua74sy3YKGgmjlfN5I6gQBVvXs9JYCph
IWakWb5e3+5VrUm4cfH8fLB+7RnGe+uVG5UCE5yQ5Z2KsvYSJWe/NmDpWCn1tKAp
snnmsCHbeEb5OARTEFAXqxRSFRiCyzbDdFMvGKU+SOQfXf3EKeZ5GybfZib9Oe3c
0IkqcImxloZafpnpqGeH+YzAKrG+54LcQQ0nxH0/uO/89mIE1acSTyUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFww
+ykQMnt4PEIJrJezPIlpmYzBMA0GCSqGSIb3DQEBCwUAA4IBAQBzY4BuomR8pAAs
rkDW3pkZaNXi+EZl6FS7k0E9RR2eQrm5BzwWB1zXcuLLXab7yL0HNPwQr9whXTrV
RXaWZTGTSlFbKom9rEL6Lx1w+dnjT+V+irWhevfXh1MEC8S9Hpi5/kWr7Cov+Pf0
3nuTgKc1ZtzkT3+whDVCispuwTVPme6x7x1nR2fMgzW/9kfNe9wx1pD4K1uHmQ1R
WcR1tkAoLK6CPaUmHU5jUh8HFcl1V/vXycKr1R8lzvcv9gDXbgh/3kohZazzeBBW
SfA7verwMTrVGgia/+m57N3F5l3BwGM8rj5ncFynqZPE2GSdVrK4xMnkhVcq/wC+
X0c+UsfH
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxIyJYq5T1ZZnQkPMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTA0MDYzOTA5WhcNMzIwOTAzMDYz
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAzVb+y9zxdfk/fBaMIQjf+9oCJ4vM1pKx7Jl3eL2tce7/
qUdV64hg2q+hiXZI9e9Tji02GrSz+hScYJRSnsOXol6Tz2LiqPvm5+nGmeEe+bCb
Lodg4DUSARleZaWjkSqoCi39tI25HnZP1lLEOtOpiCB2KeHKWV7BHerfFnInyLg9
m1dSVwItLZC5CAZrnXmPnIQu306yFnQvBd/81U5rjYGB6tbma4SOrGpJ8zcx0hv+
ELaeEOINSanuAlK6j2VZsyd9hRz9q2CQbnuT8cNX7ZX5/9GT4WFaLHwUPHpqjthI
8atlenzQ/e6VLe/Sf3asiVnrY5k2cSbofgqAxb20YQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUMlhyDqM5l0Q7FmAn
aWw0znUD4pYwDQYJKoZIhvcNAQELBQADggEBAMeGw2Tb/Q0o/8rE5iFMTdBp6VbH
WFBBCYvmJpL+HkRk7QWGy/8k1Kr5G4gnj9tkavweyq/prl/wA3VnATv+QdM3v4qs
6CWakRCeLMVRiKZZWQsvNZvqooE6YlZIxC2Gj2YW0QzJG3eplSzG1VWFpt3Eh+Jc
ozBcvmnAIQCC2YtX0DVqHFTG2qS4EhVK33H296XIXfSNzD0Rf5O5WQUuzYC7w8cZ
yOEnbtwNH9yTWndZtvO4n2Tl/qKVAIxc347slAHagLKIAQbEhMbqgJ1csPjcHt/J
5Frlzt1HtlviJjFsY+X+7pc7CT1PTHCPGOv/DOsAtiHXfQyzLozV9Drtx/o=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7140152701265059592804081616"
etcd-clients-ca: "7140152701334538361835018685"
etcd-manager-ca-events: "7140152701353152116999188294"
etcd-manager-ca-main: "7140152701343712646643132357"
etcd-peers-ca-events: "7140152701262321031184890800"
etcd-peers-ca-main: "7140152701288092082058775186"
kubernetes-ca: "7140152701493782195543542031"
service-account: "7140152701518733293461068249"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2c
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,273 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z072735718G25WNVKU834"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ModifyVolume",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterTargets",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1,50 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:ModifyNetworkInterfaceAttribute",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-a
InstanceGroupRole: Node
NodeupConfigHash: jyt+itIoHkfChG5oykaR/YcW2X+YK02YqH7IwlOP474=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-b
InstanceGroupRole: Node
NodeupConfigHash: F10MZ5YMtLK1UChahPw/MwMFfjLrY81DKA4nft2Tobk=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-c
InstanceGroupRole: Node
NodeupConfigHash: fEdAb1pHGvBokNYyHZ4CzDj3eq1vsZxS5FrjEUayRuU=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-a
InstanceGroupRole: Node
NodeupConfigHash: oZQY/P4yvbXnh4dW93Et8YpN0q6liFWsIMAyny6862g=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-b
InstanceGroupRole: Node
NodeupConfigHash: oc7Bss3+h8wRUqWSY05NxslVT4WbcTxzvi5KtLp7vuw=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-a
InstanceGroupRole: Node
NodeupConfigHash: YzHBVETSqynzG1++32lK6kNelMH04Gx2UDgb7bJWVm8=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-b
InstanceGroupRole: Node
NodeupConfigHash: RcLvuahs6C2C746ouG575y7zIBPE/45aLDopp3qLKak=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-c
InstanceGroupRole: Node
NodeupConfigHash: GZFMJ+HtfNFNr+OV9OCtF2wJLZDODBwV/NFLgSCHB2I=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2a
InstanceGroupRole: Master
NodeupConfigHash: bFvgCW9ijGRs5u8kNAX/s53tD3afsvYDdJVNW1Kq5OY=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2b
InstanceGroupRole: Master
NodeupConfigHash: 12BbVAVTnRcOLqha45NC0eii/lUhVtoQrIYpccKF/lQ=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2c
InstanceGroupRole: Master
NodeupConfigHash: 6HuG0yYyZf5DLo50saQaB9ApKbrna49ygtHGjkyb/l4=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,251 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2022-09-13T04:27:37Z"
name: dev.datasaker.io
spec:
api:
loadBalancer:
class: Classic
type: Public
authorization:
rbac: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
dnsZone: Z072735718G25WNVKU834
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: main
version: 3.5.4
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: events
version: 3.5.4
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.21.3
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.23.10
masterInternalName: api.internal.dev.datasaker.io
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.dev.datasaker.io
networkCIDR: 172.21.0.0/16
networkID: vpc-0b6e0b906c678a22f
networking:
calico:
encapsulationMode: ipip
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.21.8.0/23
id: subnet-0c875e254456809f7
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.21.10.0/23
id: subnet-05672a669943fc12f
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.21.12.0/23
id: subnet-0940fd78504acbbde
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.21.0.0/24
id: subnet-0de55619bee2411f8
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.21.1.0/24
id: subnet-0a5d787353f874684
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.21.2.0/24
id: subnet-0ee26ffc561efb292
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
topology:
dns:
type: Public
masters: private
nodes: private

View File

@@ -0,0 +1,792 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-attacher-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- csi.storage.k8s.io
resources:
- csinodeinfos
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-provisioner-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- get
- list
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- delete
- update
- create
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-resizer-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-snapshotter-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- create
- get
- list
- watch
- update
- delete
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-attacher-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-attacher-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-provisioner-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-provisioner-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-resizer-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-resizer-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-snapshotter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-snapshotter-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-getter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-csi-node-role
subjects:
- kind: ServiceAccount
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- node
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=2
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: kubelet-dir
- mountPath: /csi
name: plugin-dir
- mountPath: /dev
name: device-dir
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock
name: node-driver-registrar
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: plugin-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: ebs-csi-node-sa
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet
type: Directory
name: kubelet-dir
- hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
name: plugin-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
name: registration-dir
- hostPath:
path: /dev
type: Directory
name: device-dir
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- controller
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --k8s-tag-cluster-id=dev.datasaker.io
- --extra-tags=KubernetesCluster=dev.datasaker.io
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: key_id
name: aws-secret
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: access_key
name: aws-secret
optional: true
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --feature-gates=Topology=true
- --extra-create-metadata
- --leader-election=true
- --default-fstype=ext4
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119
imagePullPolicy: IfNotPresent
name: csi-provisioner
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b
imagePullPolicy: IfNotPresent
name: csi-attacher
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4
imagePullPolicy: IfNotPresent
name: csi-resizer
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: ebs-csi-controller-sa
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: socket-dir
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver

View File

@@ -0,0 +1,69 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 530752f323a7573cedaa993ac169181c2d36d70e1cb4950d3c1a3347ac586826
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 1060dbbcbf4f9768081b838e619da1fc3970ef2b86886f8e5c6ff3e2842c2aa3
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- id: k8s-1.23
manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml
manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4
name: leader-migration.rbac.addons.k8s.io
selector:
k8s-addon: leader-migration.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 3e67c5934d55a5f5ebbd8a97e428aa6d9749812ba209a3dc1f1cb9449ee75c26
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.22
manifest: networking.projectcalico.org/k8s-1.22.yaml
manifestHash: 94e23c0a435bb93ebb2271d4352bd25a98b8d84064a40a1ff2077111cfe6dc44
name: networking.projectcalico.org
selector:
role.kubernetes.io/networking: "1"
version: 9.99.0
- id: k8s-1.17
manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml
manifestHash: 80c38e6bb751e5c9e58a013b9c09b70d0ca34383d15889e09df214090c52713c
name: aws-ebs-csi-driver.addons.k8s.io
selector:
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
version: 9.99.0

View File

@@ -0,0 +1,385 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.8.6@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@@ -0,0 +1,140 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.24.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z072735718G25WNVKU834
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.24.1@sha256:d0bff3dff30ec695702eb954b7568e3b5aa164f458a70be1d3f5194423ef90a6
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@@ -0,0 +1,225 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.24.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/kops-controller:1.24.1@sha256:dec29a983e633e2d3321fef86e6fea211784b2dc9b62ce735d708e781ef4919c
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@@ -0,0 +1,52 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
rules:
- apiGroups:
- coordination.k8s.io
resourceNames:
- cloud-provider-extraction-migration
resources:
- leases
verbs:
- create
- list
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system::leader-locking-migration
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:kube-controller-manager
- kind: ServiceAccount
name: kube-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@@ -0,0 +1,118 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-csi-1-21
parameters:
encrypted: "true"
type: gp3
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.24.1@sha256:b969a40a66d7c9781b8f393c4bd1cc90828c45b0419e24bf2192be9a10fd6c44
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

Some files were not shown because too many files have changed in this diff Show More