This commit is contained in:
exhsgahm
2022-09-16 12:25:27 +09:00
parent f068be9215
commit 8c9fa0bcb3
50 changed files with 13524 additions and 2589 deletions

View File

@@ -16,13 +16,14 @@ resource "aws_route" "route-private-rt-datasaker-dev-0-0-0-0--0" {
resource "aws_subnet" "sbn-dev-a" { resource "aws_subnet" "sbn-dev-a" {
availability_zone = "ap-northeast-2a" availability_zone = "ap-northeast-2a"
cidr_block = "172.21.1.0/24" cidr_block = "172.21.8.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dev-a.datasaker" "Name" = "sbn-dev-a.datasaker"
"SubnetType" = "Private" "SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }
@@ -31,13 +32,14 @@ resource "aws_subnet" "sbn-dev-a" {
resource "aws_subnet" "sbn-dev-b" { resource "aws_subnet" "sbn-dev-b" {
availability_zone = "ap-northeast-2b" availability_zone = "ap-northeast-2b"
cidr_block = "172.21.2.0/24" cidr_block = "172.21.10.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dev-b.datasaker" "Name" = "sbn-dev-b.datasaker"
"SubnetType" = "Private" "SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }
@@ -46,13 +48,14 @@ resource "aws_subnet" "sbn-dev-b" {
resource "aws_subnet" "sbn-dev-c" { resource "aws_subnet" "sbn-dev-c" {
availability_zone = "ap-northeast-2c" availability_zone = "ap-northeast-2c"
cidr_block = "172.21.3.0/24" cidr_block = "172.21.12.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dev-c.datasaker" "Name" = "sbn-dev-c.datasaker"
"SubnetType" = "Private" "SubnetType" = "Private"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }

View File

@@ -15,13 +15,14 @@ output "sbn_dmz_c_id" {
resource "aws_subnet" "sbn-dmz-a" { resource "aws_subnet" "sbn-dmz-a" {
availability_zone = "ap-northeast-2a" availability_zone = "ap-northeast-2a"
cidr_block = "172.21.0.0/28" cidr_block = "172.21.0.0/24"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dmz-a.datasaker" "Name" = "sbn-dmz-a.datasaker"
"SubnetType" = "Public" "SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }
@@ -30,13 +31,14 @@ resource "aws_subnet" "sbn-dmz-a" {
resource "aws_subnet" "sbn-dmz-b" { resource "aws_subnet" "sbn-dmz-b" {
availability_zone = "ap-northeast-2b" availability_zone = "ap-northeast-2b"
cidr_block = "172.21.0.16/28" cidr_block = "172.21.1.0/24"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dmz-b.datasaker" "Name" = "sbn-dmz-b.datasaker"
"SubnetType" = "Public" "SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }
@@ -45,13 +47,14 @@ resource "aws_subnet" "sbn-dmz-b" {
resource "aws_subnet" "sbn-dmz-c" { resource "aws_subnet" "sbn-dmz-c" {
availability_zone = "ap-northeast-2c" availability_zone = "ap-northeast-2c"
cidr_block = "172.21.0.32/28" cidr_block = "172.21.2.0/24"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
"Name" = "sbn-dmz-c.datasaker" "Name" = "sbn-dmz-c.datasaker"
"SubnetType" = "Public" "SubnetType" = "Utility"
"kubernetes.io/cluster/datasaker" = "owned" "kubernetes.io/cluster/datasaker" = "owned"
"kubernetes.io/cluster/dev.datasaker.io" = "shared"
"kubernetes.io/role/elb" = "1" "kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1" "kubernetes.io/role/internal-elb" = "1"
} }

View File

@@ -15,7 +15,7 @@ resource "aws_route" "route-private-rt-datasaker-iac-0-0-0-0--0" {
resource "aws_subnet" "sbn-iac-a" { resource "aws_subnet" "sbn-iac-a" {
availability_zone = "ap-northeast-2a" availability_zone = "ap-northeast-2a"
cidr_block = "172.21.4.0/24" cidr_block = "172.21.16.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
@@ -30,7 +30,7 @@ resource "aws_subnet" "sbn-iac-a" {
resource "aws_subnet" "sbn-iac-b" { resource "aws_subnet" "sbn-iac-b" {
availability_zone = "ap-northeast-2b" availability_zone = "ap-northeast-2b"
cidr_block = "172.21.5.0/24" cidr_block = "172.21.18.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {
@@ -45,7 +45,7 @@ resource "aws_subnet" "sbn-iac-b" {
resource "aws_subnet" "sbn-iac-c" { resource "aws_subnet" "sbn-iac-c" {
availability_zone = "ap-northeast-2c" availability_zone = "ap-northeast-2c"
cidr_block = "172.21.6.0/24" cidr_block = "172.21.20.0/23"
enable_resource_name_dns_a_record_on_launch = true enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name" private_dns_hostname_type_on_launch = "resource-name"
tags = { tags = {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,273 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z072735718G25WNVKU834"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:security-group/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ModifyVolume",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterTargets",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "dev.datasaker.io"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1,50 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*",
"arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::clusters.dev.datasaker.io"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:ModifyNetworkInterfaceAttribute",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-a
InstanceGroupRole: Node
NodeupConfigHash: jyt+itIoHkfChG5oykaR/YcW2X+YK02YqH7IwlOP474=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-b
InstanceGroupRole: Node
NodeupConfigHash: F10MZ5YMtLK1UChahPw/MwMFfjLrY81DKA4nft2Tobk=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-data-c
InstanceGroupRole: Node
NodeupConfigHash: fEdAb1pHGvBokNYyHZ4CzDj3eq1vsZxS5FrjEUayRuU=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-a
InstanceGroupRole: Node
NodeupConfigHash: oZQY/P4yvbXnh4dW93Et8YpN0q6liFWsIMAyny6862g=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-mgmt-b
InstanceGroupRole: Node
NodeupConfigHash: oc7Bss3+h8wRUqWSY05NxslVT4WbcTxzvi5KtLp7vuw=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-a
InstanceGroupRole: Node
NodeupConfigHash: YzHBVETSqynzG1++32lK6kNelMH04Gx2UDgb7bJWVm8=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-b
InstanceGroupRole: Node
NodeupConfigHash: RcLvuahs6C2C746ouG575y7zIBPE/45aLDopp3qLKak=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,175 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: dev-process-c
InstanceGroupRole: Node
NodeupConfigHash: GZFMJ+HtfNFNr+OV9OCtF2wJLZDODBwV/NFLgSCHB2I=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2a
InstanceGroupRole: Master
NodeupConfigHash: bFvgCW9ijGRs5u8kNAX/s53tD3afsvYDdJVNW1Kq5OY=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2b
InstanceGroupRole: Master
NodeupConfigHash: 12BbVAVTnRcOLqha45NC0eii/lUhVtoQrIYpccKF/lQ=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,275 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=d8cbbd493e6f6133184a42c190e234c59fe9186b426191bef2f727e10bc66fba
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.24.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=62d4754900aa8d5b40a2541c22813e4f2ef9c4d06c09fa5a8cd38cf9cc35a3d9
export AWS_REGION=ap-northeast-2
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
InstanceGroupName: master-ap-northeast-2c
InstanceGroupRole: Master
NodeupConfigHash: 6HuG0yYyZf5DLo50saQaB9ApKbrna49ygtHGjkyb/l4=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@@ -0,0 +1,251 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2022-09-13T04:27:37Z"
name: dev.datasaker.io
spec:
api:
loadBalancer:
class: Classic
type: Public
authorization:
rbac: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: true
version: v1.8.0
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io
configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io
containerRuntime: containerd
containerd:
logLevel: info
version: 1.6.6
dnsZone: Z072735718G25WNVKU834
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: main
version: 3.5.4
- backups:
backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-ap-northeast-2a
name: a
- encryptedVolume: true
instanceGroup: master-ap-northeast-2b
name: b
- encryptedVolume: true
instanceGroup: master-ap-northeast-2c
name: c
memoryRequest: 100Mi
name: events
version: 3.5.4
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: dev.datasaker.io
configureCloudRoutes: false
enableLeaderMigration: true
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.23.10@sha256:91c9d5d25c193cd1a2edd5082a3af479e85699bb46aaa58652d17b0f3b442c0f
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.21.3
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.23.10@sha256:44bd124475325eda0906fef789f358d47665104cc6118fb5901b6cbb64ed201a
logLevel: 2
kubeScheduler:
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.23.10@sha256:07d72b53818163ad25b49693a0b9d35d5eb1d1aa2e6363f87fac8ab903164a0e
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.23.10
masterInternalName: api.internal.dev.datasaker.io
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.dev.datasaker.io
networkCIDR: 172.21.0.0/16
networkID: vpc-0b6e0b906c678a22f
networking:
calico:
encapsulationMode: ipip
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.21.8.0/23
id: subnet-0c875e254456809f7
name: ap-northeast-2a
type: Private
zone: ap-northeast-2a
- cidr: 172.21.10.0/23
id: subnet-05672a669943fc12f
name: ap-northeast-2b
type: Private
zone: ap-northeast-2b
- cidr: 172.21.12.0/23
id: subnet-0940fd78504acbbde
name: ap-northeast-2c
type: Private
zone: ap-northeast-2c
- cidr: 172.21.0.0/24
id: subnet-0de55619bee2411f8
name: utility-ap-northeast-2a
type: Utility
zone: ap-northeast-2a
- cidr: 172.21.1.0/24
id: subnet-0a5d787353f874684
name: utility-ap-northeast-2b
type: Utility
zone: ap-northeast-2b
- cidr: 172.21.2.0/24
id: subnet-0ee26ffc561efb292
name: utility-ap-northeast-2c
type: Utility
zone: ap-northeast-2c
topology:
dns:
type: Public
masters: private
nodes: private

View File

@@ -0,0 +1,792 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-attacher-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- csi.storage.k8s.io
resources:
- csinodeinfos
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-provisioner-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- get
- list
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- delete
- update
- create
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-resizer-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-external-snapshotter-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- create
- get
- list
- watch
- update
- delete
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-attacher-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-attacher-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-provisioner-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-provisioner-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-resizer-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-resizer-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-snapshotter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-external-snapshotter-role
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-getter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-csi-node-role
subjects:
- kind: ServiceAccount
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node-sa
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-node
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- node
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=2
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: kubelet-dir
- mountPath: /csi
name: plugin-dir
- mountPath: /dev
name: device-dir
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock
name: node-driver-registrar
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: plugin-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: ebs-csi-node-sa
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet
type: Directory
name: kubelet-dir
- hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
name: plugin-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
name: registration-dir
- hostPath:
path: /dev
type: Directory
name: device-dir
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- controller
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --k8s-tag-cluster-id=dev.datasaker.io
- --extra-tags=KubernetesCluster=dev.datasaker.io
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: key_id
name: aws-secret
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: access_key
name: aws-secret
optional: true
image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
name: ebs-plugin
ports:
- containerPort: 9808
name: healthz
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --feature-gates=Topology=true
- --extra-create-metadata
- --leader-election=true
- --default-fstype=ext4
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119
imagePullPolicy: IfNotPresent
name: csi-provisioner
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b
imagePullPolicy: IfNotPresent
name: csi-attacher
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4
imagePullPolicy: IfNotPresent
name: csi-resizer
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --csi-address=/csi/csi.sock
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02
imagePullPolicy: IfNotPresent
name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: ebs-csi-controller-sa
tolerations:
- operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/name: aws-ebs-csi-driver
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: socket-dir
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io
app.kubernetes.io/instance: aws-ebs-csi-driver
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-ebs-csi-driver
app.kubernetes.io/version: v1.8.0
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
name: ebs-csi-controller
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/instance: aws-ebs-csi-driver

View File

@@ -0,0 +1,69 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 530752f323a7573cedaa993ac169181c2d36d70e1cb4950d3c1a3347ac586826
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 1060dbbcbf4f9768081b838e619da1fc3970ef2b86886f8e5c6ff3e2842c2aa3
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- id: k8s-1.23
manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml
manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4
name: leader-migration.rbac.addons.k8s.io
selector:
k8s-addon: leader-migration.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 3e67c5934d55a5f5ebbd8a97e428aa6d9749812ba209a3dc1f1cb9449ee75c26
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.22
manifest: networking.projectcalico.org/k8s-1.22.yaml
manifestHash: 94e23c0a435bb93ebb2271d4352bd25a98b8d84064a40a1ff2077111cfe6dc44
name: networking.projectcalico.org
selector:
role.kubernetes.io/networking: "1"
version: 9.99.0
- id: k8s-1.17
manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml
manifestHash: 80c38e6bb751e5c9e58a013b9c09b70d0ca34383d15889e09df214090c52713c
name: aws-ebs-csi-driver.addons.k8s.io
selector:
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
version: 9.99.0

View File

@@ -0,0 +1,385 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.8.6@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@@ -0,0 +1,140 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.24.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z072735718G25WNVKU834
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.24.1@sha256:d0bff3dff30ec695702eb954b7568e3b5aa164f458a70be1d3f5194423ef90a6
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@@ -0,0 +1,225 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.24.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.24.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/kops-controller:1.24.1@sha256:dec29a983e633e2d3321fef86e6fea211784b2dc9b62ce735d708e781ef4919c
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@@ -0,0 +1,52 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
rules:
- apiGroups:
- coordination.k8s.io
resourceNames:
- cloud-provider-extraction-migration
resources:
- leases
verbs:
- create
- list
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: leader-migration.rbac.addons.k8s.io
name: system::leader-locking-migration
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system::leader-locking-migration
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:kube-controller-manager
- kind: ServiceAccount
name: kube-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@@ -0,0 +1,118 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-csi-1-21
parameters:
encrypted: "true"
type: gp3
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,4 @@
{
"memberCount": 3,
"etcdVersion": "3.5.4"
}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220727@sha256:256a64fb44876d270f04ada1afd3ca431341f249aa52cbe2b3780f8f23961142
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.24.1@sha256:b969a40a66d7c9781b8f393c4bd1cc90828c45b0419e24bf2192be9a10fd6c44
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: data
kops.k8s.io/instancegroup: dev-data-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: mgmt
kops.k8s.io/instancegroup: dev-mgmt-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-a
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-b
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,70 @@
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "7142721951268583043543051771"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
datasaker/group: process
kops.k8s.io/instancegroup: dev-process-c
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS
UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5
NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec
E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf
DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU
fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp
vwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX
DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/
wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG
yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G
VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5
db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0
xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt
Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA
Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4
wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr
anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd
wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob
PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O
ILB6F+I/dTp9EdI/qBNrqg==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw
NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4
L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf
A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1
ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ
ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe
1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB
n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8
sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN
FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa
xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm
eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z
jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN
MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO
uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW
GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm
lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie
tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY
GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ
+QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C
vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp
fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh
xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX
cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB
jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS
XOkBupErXPmZkj/8CEk=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO
4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F
5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV
mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn
l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l
/Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4
gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD
qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5
9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77
T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc
zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0
Z2RjK1J0AFawFQ==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB
dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR
IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4
5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ
KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D
WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy
hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy
55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI
vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n
n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ
1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0
u1zeRMpGpRYUtA==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5
MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod
F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B
Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA
P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I
Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z
2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi
L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut
ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ
EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5
ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n
ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp
1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb
Ge9OTWOZ
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7142721951056419283723637893"
etcd-clients-ca: "7142721951028761584467841211"
etcd-manager-ca-events: "7142721951057921435241405328"
etcd-manager-ca-main: "7142721951074386633614158554"
etcd-peers-ca-events: "7142721951138880539659455124"
etcd-peers-ca-main: "7142721951056140991529806803"
kubernetes-ca: "7142721951268583043543051771"
service-account: "7142721951191621691964241737"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2a
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS
UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5
NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec
E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf
DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU
fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp
vwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX
DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/
wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG
yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G
VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5
db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0
xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt
Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA
Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4
wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr
anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd
wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob
PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O
ILB6F+I/dTp9EdI/qBNrqg==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw
NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4
L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf
A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1
ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ
ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe
1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB
n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8
sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN
FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa
xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm
eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z
jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN
MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO
uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW
GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm
lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie
tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY
GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ
+QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C
vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp
fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh
xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX
cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB
jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS
XOkBupErXPmZkj/8CEk=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO
4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F
5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV
mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn
l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l
/Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4
gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD
qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5
9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77
T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc
zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0
Z2RjK1J0AFawFQ==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB
dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR
IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4
5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ
KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D
WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy
hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy
55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI
vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n
n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ
1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0
u1zeRMpGpRYUtA==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5
MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod
F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B
Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA
P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I
Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z
2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi
L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut
ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ
EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5
ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n
ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp
1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb
Ge9OTWOZ
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7142721951056419283723637893"
etcd-clients-ca: "7142721951028761584467841211"
etcd-manager-ca-events: "7142721951057921435241405328"
etcd-manager-ca-main: "7142721951074386633614158554"
etcd-peers-ca-events: "7142721951138880539659455124"
etcd-peers-ca-main: "7142721951056140991529806803"
kubernetes-ca: "7142721951268583043543051771"
service-account: "7142721951191621691964241737"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2b
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 3
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.23.10@sha256:a3b6ba0b713cfba71e161e84cef0b2766b99c0afb0d96cd4f1e0f7d6ae0b0467
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.dev.datasaker.io
serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS
UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5
NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec
E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf
DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU
fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp
vwIDAQAB
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- c2ba75b36000103af6fa2c3955c5b8a633b33740e234931441082e21a334b80b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubelet
- 3ffa658e7f1595f622577b160bdcdc7a5a90d09d234757ffbe53dd50c0cb88f7@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/amd64/kubectl
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
- 6e8b24be90fffce6b025d254846da9d2ca6d65125f9139b6354bab0272253d01@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
- be3ba338e0ae31d6af1d4b1919ce3d47b90929d64f833cba1319126041c8ed48@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-amd64
- ec58ee1ee38d06cde56fd4442f119f0392f9b5fcbef19f400e963faedc94e486@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-amd64
arm64:
- 8ce1c79ee7c5d346719e3637e72a51dd96fc7f2e1f443aa39b05c1d9d9de32c8@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubelet
- d88b7777b3227dd49f44dbd1c7b918f9ddc5d016ecc47547a717a501fcdc316b@https://storage.googleapis.com/kubernetes-release/release/v1.23.10/bin/linux/arm64/kubectl
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb@https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-arm64.tar.gz
- 00c9ad161a77a01d9dcbd25b1d76fa9822e57d8e4abf26ba8907c98f6bcfcd0f@https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.arm64
- 1c18bed0002e39bf4f4f62fb541b81c43e2c2b666884c2f0293551d2e76959df@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.24.1/protokube-linux-arm64
- 7e8e043963fe510de37db2ecf7d1ec311e21ce58478c5fc7b54ae74a039a288b@https://artifacts.k8s.io/binaries/kops/1.24.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.24.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX
DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/
wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG
yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G
VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5
db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0
xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt
Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA
Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4
wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr
anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd
wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob
PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O
ILB6F+I/dTp9EdI/qBNrqg==
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw
NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4
L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf
A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1
ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ
ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe
1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB
n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8
sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN
FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa
xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm
eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z
jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN
MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO
uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW
GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm
lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie
tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY
GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ
+QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C
vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp
fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh
xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX
cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB
jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS
XOkBupErXPmZkj/8CEk=
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO
4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F
5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV
mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn
l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l
/Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4
gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD
qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5
9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77
T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc
zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0
Z2RjK1J0AFawFQ==
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy
MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB
dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR
IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4
5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ
KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D
WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB
AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy
hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy
55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI
vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n
n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ
1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0
u1zeRMpGpRYUtA==
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5
MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod
F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B
Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA
P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I
Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z
2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi
L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut
ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ
EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5
ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n
ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp
1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb
Ge9OTWOZ
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0
OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ
hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz
iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t
/eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt
UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z
h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B
Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL
hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap
afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz
VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS
qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg
jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM
cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA=
-----END CERTIFICATE-----
ClusterName: dev.datasaker.io
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "7142721951056419283723637893"
etcd-clients-ca: "7142721951028761584467841211"
etcd-manager-ca-events: "7142721951057921435241405328"
etcd-manager-ca-main: "7142721951074386633614158554"
etcd-peers-ca-events: "7142721951138880539659455124"
etcd-peers-ca-main: "7142721951056140991529806803"
kubernetes-ca: "7142721951268583043543051771"
service-account: "7142721951191621691964241737"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
CSIMigrationAWS: "true"
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/instancegroup: master-ap-northeast-2c
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: false
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.6.6
etcdManifests:
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main.yaml
- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

File diff suppressed because it is too large Load Diff