diff --git a/kops/dev.datasaker.io-20221025.yaml b/kops/dev.datasaker.io-20221025.yaml new file mode 100644 index 0000000..77efe17 --- /dev/null +++ b/kops/dev.datasaker.io-20221025.yaml @@ -0,0 +1,516 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + generation: 2 + name: dev.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudProvider: aws + configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io + containerRuntime: containerd + containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.25.2 + masterInternalName: api.internal.dev.datasaker.io + masterPublicName: api.dev.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-0b6e0b906c678a22f + networking: + calico: {} + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.21.8.0/23 + id: subnet-0c875e254456809f7 + name: ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.10.0/23 + id: subnet-05672a669943fc12f + name: ap-northeast-2b + type: Private + zone: ap-northeast-2b + - cidr: 172.21.12.0/23 + id: subnet-0940fd78504acbbde + name: ap-northeast-2c + type: Private + zone: ap-northeast-2c + - cidr: 172.21.0.0/24 + id: subnet-0de55619bee2411f8 + name: utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + - cidr: 172.21.1.0/24 + id: subnet-0a5d787353f874684 + name: utility-ap-northeast-2b + type: Utility + zone: ap-northeast-2b + - cidr: 172.21.2.0/24 + id: subnet-0ee26ffc561efb292 + name: utility-ap-northeast-2c + type: Utility + zone: ap-northeast-2c + topology: + dns: + type: Public + masters: private + nodes: private + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:38:40Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-druid-a +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-a + role: Node + subnets: + - ap-northeast-2a + taints: + - dev/data-druid:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:39:34Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-druid-b +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-b + role: Node + subnets: + - ap-northeast-2b + taints: + - dev/data-druid:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:40:36Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-druid-c +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-c + role: Node + subnets: + - ap-northeast-2c + taints: + - dev/data-druid:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:43:26Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-kafka-a +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-a + role: Node + subnets: + - ap-northeast-2a + taints: + - dev/data-kafka:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:44:44Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-kafka-b +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-b + role: Node + subnets: + - ap-northeast-2b + taints: + - dev/data-kafka:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:45:41Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-data-kafka-c +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-c + role: Node + subnets: + - ap-northeast-2c + taints: + - dev/data-kafka:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:41:13Z" + generation: 7 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-mgmt-a +spec: + image: ami-0abb33b73a78cae31 + machineType: m6i.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-a + role: Node + subnets: + - ap-northeast-2a + taints: + - dev/mgmt:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:42:21Z" + generation: 5 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-mgmt-b +spec: + image: ami-0abb33b73a78cae31 + machineType: m6i.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-b + role: Node + subnets: + - ap-northeast-2b + taints: + - dev/mgmt:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-10-19T07:33:08Z" + generation: 4 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-mgmt-c +spec: + image: ami-0abb33b73a78cae31 + kubelet: + anonymousAuth: false + nodeLabels: + node-role.kubernetes.io/node: "" + machineType: m6i.xlarge + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-c + role: Node + subnets: + - ap-northeast-2c + taints: + - dev/mgmt:NoSchedule + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:37:18Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-a +spec: + image: ami-0abb33b73a78cae31 + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-a + role: Node + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:38:00Z" + generation: 4 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-b +spec: + image: ami-0abb33b73a78cae31 + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-b + role: Node + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:38:31Z" + generation: 4 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: dev-process-c +spec: + image: ami-0abb33b73a78cae31 + machineType: m6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-c + role: Node + subnets: + - ap-northeast-2c + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + generation: 3 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2a +spec: + image: ami-0abb33b73a78cae31 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: c6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2b +spec: + image: ami-0abb33b73a78cae31 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: c6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2b + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + generation: 2 + labels: + kops.k8s.io/cluster: dev.datasaker.io + name: master-ap-northeast-2c +spec: + image: ami-0abb33b73a78cae31 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: c6i.large + manager: CloudGroup + maxSize: 1 + minSize: 1 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2c + role: Master + rootVolumeSize: 50 + subnets: + - ap-northeast-2c diff --git a/terraform/tf-kops-dev-20221025/data/aws_iam_role_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20221025/data/aws_iam_role_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_iam_role_masters.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_iam_role_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20221025/data/aws_iam_role_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..9f31f33 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_iam_role_nodes.dev.datasaker.io_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_masters.dev.datasaker.io_policy b/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_masters.dev.datasaker.io_policy new file mode 100644 index 0000000..86daf59 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_masters.dev.datasaker.io_policy @@ -0,0 +1,259 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::hostedzone/Z072735718G25WNVKU834" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "dev.datasaker.io" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy b/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy new file mode 100644 index 0000000..aa71a3a --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy @@ -0,0 +1,50 @@ +{ + "Statement": [ + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/addons/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/cluster-completed.spec", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/igconfig/node/*", + "arn:aws:s3:::clusters.dev.datasaker.io/dev.datasaker.io/secrets/dockerconfig" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::clusters.dev.datasaker.io" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:ModifyNetworkInterfaceAttribute", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key b/terraform/tf-kops-dev-20221025/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key new file mode 100644 index 0000000..c99269a --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCyfTPnCyr0Typ7yGTcy0LEGa8IH8yESEXa4Qyr85dWrxazTnWO7iYS0Ze6L0GMMO5qZXg/ntJGhI4PYF/WbCZ5KZMRXePyQIVs5pKMvSX4yH2gPIET5c6yTg4ZSIqrZDLBXGEZxMVp/SnNx1tRzxi0plBDtguSy6LZD0C1ue+VeT4oO98EB2T01GOeQp+RlF/theZuEWSWOVfFD0qVdsHIwVlYYlEZR11IrTamabMOVzyw+/8cokA4hgsrrkSrpKQ2YW0evHK1pxZrw+i3YJuHh3hJ0h98Ymw3rpHGec59gXaYT0PQEQvZs9RCrYw8NpCTQrImXR1UVjeeY3KGgpYQXna+WAmkjA+K/JvLmHGeombVJyd3v8330FX+Ob9klgqTWFvwb8Ew4QCcfl5hDAWxvzoJKAoG/TAZd13aNYaZAVkeWB7vPFWZ0brea6sqUJzXqzPwUXa0OirnqEfxMLZoo4tFyfxuVYVK+ScxayBPYJQkhwmTAZ4bj0OfQEw/jJM= hsgahm@ws-ubuntu \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..959fc3b --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-a.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-druid-a +InstanceGroupRole: Node +NodeupConfigHash: i8ZU3JYn4ky+JMWbd5o6SGd8pQX2T2Js/GJVM4ELTMY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..41502b0 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-b.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-druid-b +InstanceGroupRole: Node +NodeupConfigHash: zKG8laSZLKUofsgCmpTkjq/wM804eIkMHGX+BShf8Xk= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..eff3b4e --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-druid-c.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-druid-c +InstanceGroupRole: Node +NodeupConfigHash: YhiWkxZTwUsYQwl0HRo682ku6OjJiay8SNHD4Ai2RDo= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..0b7ea7d --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-a.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-kafka-a +InstanceGroupRole: Node +NodeupConfigHash: DKAdNBi89Hg/K4/++jF/gCUMxV0IzUKiyjnFAvhnWHg= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..7d589ef --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-b.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-kafka-b +InstanceGroupRole: Node +NodeupConfigHash: G3nQuQTQuU0v5JRMfVZywJVUZxZGBIELTfwxmUeEKlc= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..0303140 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-data-kafka-c.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-data-kafka-c +InstanceGroupRole: Node +NodeupConfigHash: zcqf9y8BDn/MuYepBtfh/+13fWsk+LzhH98DXYXKSaQ= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..fe0bdb5 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-a +InstanceGroupRole: Node +NodeupConfigHash: /7AvbMZe7bqnvkFDqaaD58JLNpSdFz2YBV7bicn9SBU= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..44e0693 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-b +InstanceGroupRole: Node +NodeupConfigHash: 1bim3y49H61m6xqaMNmZ7FJaqPQ7bhobuaKlBbsK4Io= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..70405f6 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-mgmt-c.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-mgmt-c +InstanceGroupRole: Node +NodeupConfigHash: ARAislI0qz2jhFUF8yatW1ITTWXRkY9Sxc25BBt77Vw= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data new file mode 100644 index 0000000..e657dd9 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-a +InstanceGroupRole: Node +NodeupConfigHash: xmkzEMYRSeEptG16cJUXqjf+kA09Jt09sDchuhwQxyA= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data new file mode 100644 index 0000000..b88d8ea --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-b +InstanceGroupRole: Node +NodeupConfigHash: ILasOIasBRTzHAYQV7G3YTw6QKD/vKU+7qgAPRWT/fs= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data new file mode 100644 index 0000000..4e7b808 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data @@ -0,0 +1,192 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: dev-process-c +InstanceGroupRole: Node +NodeupConfigHash: KPtyTPA//J5si06n+qNL/6sozOWSO372LH6NtCxyMPA= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..13635a0 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data @@ -0,0 +1,290 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.25.2@sha256:f961aee35fd2e9a5ee057365e56c5bf40a39bfef91f785f312e51891db41876b + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.25.2@sha256:ef2e24a920a7432aff5b435562301dde3beb528b0c7bbec58ddf0a9af64d5fce + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2a +InstanceGroupRole: Master +NodeupConfigHash: qan0mo1KY5Q5p8aQ+/k3sCUBKhwzY63cGLMWRd4Mcnc= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..ff92260 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data @@ -0,0 +1,290 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.25.2@sha256:f961aee35fd2e9a5ee057365e56c5bf40a39bfef91f785f312e51891db41876b + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.25.2@sha256:ef2e24a920a7432aff5b435562301dde3beb528b0c7bbec58ddf0a9af64d5fce + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2b +InstanceGroupRole: Master +NodeupConfigHash: VE3SMVuK4n6CDYK1kIill+w/93F5DHPTsu1BDc0qBFg= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data new file mode 100644 index 0000000..c7a2055 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data @@ -0,0 +1,290 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-amd64 +NODEUP_HASH_AMD64=3f080d73908f1263c9754f114042f8a934c2239c17bc73b04a2f84c64ec4f68f +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.25.0/nodeup-linux-arm64 +NODEUP_HASH_ARM64=4a906834670bd86b5a3256aa1bba81c2d3aee5ff440e723a048fa832b336487c + +export AWS_REGION=ap-northeast-2 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true +containerRuntime: containerd +containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + cpuRequest: 100m + memoryRequest: 100Mi + version: 3.5.4 + main: + cpuRequest: 200m + memoryRequest: 100Mi + version: 3.5.4 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.25.2@sha256:f961aee35fd2e9a5ee057365e56c5bf40a39bfef91f785f312e51891db41876b + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 +kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.25.2@sha256:ef2e24a920a7432aff5b435562301dde3beb528b0c7bbec58ddf0a9af64d5fce + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: s3://clusters.dev.datasaker.io/dev.datasaker.io +InstanceGroupName: master-ap-northeast-2c +InstanceGroupRole: Master +NodeupConfigHash: jBEDU8SvgVWo5cG3BGBSUNYwYZZoorJwlehYum+BqPA= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_cluster-completed.spec_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000..6ba7f4a --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,275 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2022-09-13T04:27:37Z" + generation: 2 + name: dev.datasaker.io +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + rbac: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: true + version: v1.8.0 + manageStorageClasses: true + cloudControllerManager: + allocateNodeCIDRs: true + clusterCIDR: 100.64.0.0/10 + clusterName: dev.datasaker.io + configureCloudRoutes: false + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.25.0 + leaderElection: + leaderElect: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: s3://clusters.dev.datasaker.io/dev.datasaker.io + configStore: s3://clusters.dev.datasaker.io/dev.datasaker.io + containerRuntime: containerd + containerd: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 + dnsZone: Z072735718G25WNVKU834 + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: main + version: 3.5.4 + - backups: + backupStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: master-ap-northeast-2a + name: a + - encryptedVolume: true + instanceGroup: master-ap-northeast-2b + name: b + - encryptedVolume: true + instanceGroup: master-ap-northeast-2c + name: c + memoryRequest: 100Mi + name: events + version: 3.5.4 + externalDns: + provider: dns-controller + iam: + allowContainerRegistry: true + legacy: false + keyStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 100.96.0.0/11 + clusterName: dev.datasaker.io + configureCloudRoutes: false + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-controller-manager:v1.25.2@sha256:f961aee35fd2e9a5ee057365e56c5bf40a39bfef91f785f312e51891db41876b + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.25.2@sha256:ddde7d23d168496d321ef9175a8bf964a54a982b026fb207c306d853cbbd4f77 + logLevel: 2 + kubeScheduler: + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-scheduler:v1.25.2@sha256:ef2e24a920a7432aff5b435562301dde3beb528b0c7bbec58ddf0a9af64d5fce + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.25.2 + masterInternalName: api.internal.dev.datasaker.io + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.dev.datasaker.io + networkCIDR: 172.21.0.0/16 + networkID: vpc-0b6e0b906c678a22f + networking: + calico: + encapsulationMode: ipip + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: s3://clusters.dev.datasaker.io/dev.datasaker.io/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.21.8.0/23 + id: subnet-0c875e254456809f7 + name: ap-northeast-2a + type: Private + zone: ap-northeast-2a + - cidr: 172.21.10.0/23 + id: subnet-05672a669943fc12f + name: ap-northeast-2b + type: Private + zone: ap-northeast-2b + - cidr: 172.21.12.0/23 + id: subnet-0940fd78504acbbde + name: ap-northeast-2c + type: Private + zone: ap-northeast-2c + - cidr: 172.21.0.0/24 + id: subnet-0de55619bee2411f8 + name: utility-ap-northeast-2a + type: Utility + zone: ap-northeast-2a + - cidr: 172.21.1.0/24 + id: subnet-0a5d787353f874684 + name: utility-ap-northeast-2b + type: Utility + zone: ap-northeast-2b + - cidr: 172.21.2.0/24 + id: subnet-0ee26ffc561efb292 + name: utility-ap-northeast-2c + type: Utility + zone: ap-northeast-2c + topology: + dns: + type: Public + masters: private + nodes: private diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content new file mode 100644 index 0000000..5e1fa8b --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content @@ -0,0 +1,237 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + k8s-app: aws-cloud-controller-manager + name: aws-cloud-controller-manager + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: aws-cloud-controller-manager + template: + metadata: + creationTimestamp: null + labels: + k8s-app: aws-cloud-controller-manager + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --allocate-node-cidrs=true + - --cluster-cidr=100.64.0.0/10 + - --cluster-name=dev.datasaker.io + - --configure-cloud-routes=false + - --leader-elect=true + - --v=2 + - --cloud-provider=aws + - --use-service-account-credentials=true + - --cloud-config=/etc/kubernetes/cloud.config + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.25.0@sha256:d12285173cb301d08ce1a56256782478ec2ec334f3af345b9f753b8de2598aad + imagePullPolicy: IfNotPresent + name: aws-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/kubernetes/cloud.config + name: cloudconfig + readOnly: true + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: aws-cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node.kubernetes.io/not-ready + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - hostPath: + path: /etc/kubernetes/cloud.config + type: "" + name: cloudconfig + updateStrategy: + type: RollingUpdate + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: aws-cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - '*' +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - node-controller + - service-controller + - route-controller + resources: + - serviceaccounts/token + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- apiGroup: "" + kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content new file mode 100644 index 0000000..fa7d63b --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content @@ -0,0 +1,785 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-attacher-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - csi.storage.k8s.io + resources: + - csinodeinfos + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-provisioner-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-resizer-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-snapshotter-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-attacher-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-provisioner-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-resizer-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-snapshotter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-getter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-csi-node-role +subjects: +- kind: ServiceAccount + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-role +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=2 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 + imagePullPolicy: IfNotPresent + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: ebs-csi-node-sa + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - controller + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --k8s-tag-cluster-id=dev.datasaker.io + - --extra-tags=KubernetesCluster=dev.datasaker.io + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + image: registry.k8s.io/provider-aws/aws-ebs-csi-driver:v1.8.0@sha256:2727c4ba96b420f6280107daaf4a40a5de5f7241a1b70052056a5016dff05b2f + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0@sha256:122bfb8c1edabb3c0edd63f06523e6940d958d19b3957dc7b1d6f81e9f1f6119 + imagePullPolicy: IfNotPresent + name: csi-provisioner + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0@sha256:8b9c313c05f54fb04f8d430896f5f5904b6cb157df261501b29adc04d2b2dc7b + imagePullPolicy: IfNotPresent + name: csi-attacher + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0@sha256:9ebbf9f023e7b41ccee3d52afe39a89e3ddacdbb69269d583abfc25847cfd9e4 + imagePullPolicy: IfNotPresent + name: csi-resizer + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0@sha256:44d8275b3f145bc290fd57cb00de2d713b5e72d2e827d8c5555f8ddb40bf3f02 + imagePullPolicy: IfNotPresent + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: ebs-csi-controller-sa + tolerations: + - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - emptyDir: {} + name: socket-dir + +--- + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.8.0 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content new file mode 100644 index 0000000..bb74fd2 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content @@ -0,0 +1,118 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: c42ef9711dcd1f31f50a3ee10190ab78be7fd8e896a5c0eef4974ead759db649 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: e74407f12bb4589901fc6e1785395747588ba2e8788ffbca664d88c4c8da58e5 + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml + manifestHash: b9c91e09c0f28c9b74ff140b8395d611834c627d698846d625c10975a74a48c4 + name: leader-migration.rbac.addons.k8s.io + selector: + k8s-addon: leader-migration.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 17a7d2855452f5f93b465ea1bcaaa20ad8e94e6e4e544ae17eae7808454fc78d + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 + - id: k8s-1.25 + manifest: networking.projectcalico.org/k8s-1.25.yaml + manifestHash: a3d712db23f74a4313682e8ff77b2bd1fa58568526507291f8cbfa694efe6995 + name: networking.projectcalico.org + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - kind: Service + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=networking.projectcalico.org,app.kubernetes.io/managed-by=kops + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 + - id: k8s-1.18 + manifest: aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml + manifestHash: 7f65474ed049a1a20b3360ff214f97ebeb660160a06f329cc0cff62f4c7d0fdf + name: aws-cloud-controller.addons.k8s.io + selector: + k8s-addon: aws-cloud-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.17 + manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml + manifestHash: 8b9b712bbb505db50b8c92325440ea039f26f18b6e82c171827ecafaba2f2c2f + name: aws-ebs-csi-driver.addons.k8s.io + selector: + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + version: 9.99.0 diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..96c110d --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5@sha256:aa60f453d64dfb3c3fd9e7306f988c36c6352c4f2d956aa90467f2808091effa + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000..6ee2fbb --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.25.0 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.25.0 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z072735718G25WNVKU834 + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.25.0@sha256:635bf83f6dc4f6d7ee42b90c005c5ec61d9fbdd56c25eff3150e4ce5e0c77699 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000..afe8e60 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"s3://clusters.dev.datasaker.io/dev.datasaker.io","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.dev.datasaker.io"],"Region":"ap-northeast-2"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"],"useInstanceIDForNodeName":true}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.25.0 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.dev.datasaker.io + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.25.0 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.25.0@sha256:ffca50fd02426835e9b3c51d950e0726774de27934d944457a0391d8ba64a5e6 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000..0cde75e --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content new file mode 100644 index 0000000..86d68c7 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +rules: +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-provider-extraction-migration + resources: + - leases + verbs: + - create + - list + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: leader-migration.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: leader-migration.rbac.addons.k8s.io + name: system::leader-locking-migration + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system::leader-locking-migration +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-controller-manager +- kind: ServiceAccount + name: kube-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000..502c682 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.25_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.25_content new file mode 100644 index 0000000..af7f66f --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.25_content @@ -0,0 +1,4844 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system + +--- + +apiVersion: v1 +data: + calico_backend: bird + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "assign_ipv4": "true", + "assign_ipv6": "false", + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + typha_service_name: none + veth_mtu: "0" +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-config + namespace: kube-system + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: 'ASNumber is the default AS number used by a node. [Default: + 64512]' + format: int32 + type: integer + bindMode: + description: BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + listenPort: + description: ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]' + type: string + nodeMeshMaxRestartTime: + description: Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: 'NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + sourceAddress: + description: Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: Type of the source where a route is learned + from. + type: string + type: object + type: + description: Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: 'AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]' + type: boolean + allowVXLANPacketsFromWorkloads: + description: 'AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]' + type: boolean + awsSrcDstCheck: + description: 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]' + type: boolean + bpfDataIfacePattern: + description: BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico''s BPF maps and cannot insert their own + BPF programs to interfere with Calico''s. [Default: true]' + type: boolean + bpfEnabled: + description: 'BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]' + type: boolean + bpfEnforceRPF: + description: 'BPFEnforceRPF enforce strict RPF on all interfaces with + BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled or Strict. [Default: Strict]' + type: string + bpfExtToServiceConnmark: + description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]' + type: integer + bpfExternalServiceMode: + description: 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfKubeProxyEndpointSlicesEnabled: + description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]' + type: boolean + bpfKubeProxyMinSyncPeriod: + description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix''s embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]' + type: string + bpfLogLevel: + description: 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: 'BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption.' + type: integer + bpfMapSizeIPSets: + description: BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: 'BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]' + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean + chainInsertMode: + description: 'ChainInsertMode controls whether Felix hooks the kernel''s + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico''s rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]' + type: string + dataplaneDriver: + description: DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: 'DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix''s (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s]' + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: FeatureDetectOverride is used to override the feature + detection. Values are specified in a comma separated list with no + spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". + "true" or "false" will force the feature, empty or omitted values + are auto-detected. + type: string + floatingIPs: + description: FloatingIPs configures whether or not Felix will program + floating IP addresses. + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: 'GenericXDPEnabled enables Generic XDP so network cards + that don''t support XDP offload or driver modes can use XDP. This + is not recommended since it doesn''t provide better performance + than iptables. [Default: false]' + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + interfaceExclude: + description: 'InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes'' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with ''/''. For example having values ''/^kube/,veth1'' + will exclude all interfaces that begin with ''kube'' and also the + interface ''veth1''. [Default: kube-ipvs0]' + type: string + interfacePrefix: + description: 'InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the ''cali'' value, and our OpenStack integration + sets the ''tap'' value. [Default: cali]' + type: string + interfaceRefreshInterval: + description: InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: 'IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]' + type: boolean + ipipMTU: + description: 'IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]' + type: integer + ipsetsRefreshInterval: + description: 'IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: + 90s]' + type: string + iptablesBackend: + description: IptablesBackend specifies which backend of iptables will + be used. The default is legacy. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: 'IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix''s + container at a different path). [Default: /run/xtables.lock]' + type: string + iptablesLockProbeInterval: + description: 'IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]' + type: string + iptablesLockTimeout: + description: 'IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]' + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: 'IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]' + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: 'IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn''t + respect the iptables lock. [Default: 1s]' + type: string + iptablesRefreshInterval: + description: 'IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico''s rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]' + type: string + ipv6Support: + description: IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: 'KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767].' + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: 'LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]' + type: string + logPrefix: + description: 'LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]' + type: string + logSeverityFile: + description: 'LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + logSeveritySys: + description: 'LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]' + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: 'MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]' + type: string + metadataPort: + description: 'MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not ''None''), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775].' + type: integer + mtuIfacePattern: + description: MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: 'OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]' + type: string + policySyncPathPrefix: + description: 'PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]' + type: string + prometheusGoMetricsEnabled: + description: 'PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusMetricsEnabled: + description: 'PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]' + type: boolean + prometheusMetricsHost: + description: 'PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]' + type: integer + prometheusProcessMetricsEnabled: + description: 'PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]' + type: boolean + prometheusWireGuardMetricsEnabled: + description: 'PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]' + type: boolean + removeExternalRoutes: + description: Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: 'ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]' + type: string + reportingTTL: + description: 'ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]' + type: string + routeRefreshInterval: + description: 'RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico''s rules. Set to 0 to disable route refresh. + [Default: 90s]' + type: string + routeSource: + description: 'RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + type: string + routeSyncDisabled: + description: RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean + routeTableRange: + description: Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: 'SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]' + type: boolean + usageReportingEnabled: + description: 'UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]' + type: boolean + usageReportingInitialDelay: + description: 'UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]' + type: string + usageReportingInterval: + description: 'UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]' + type: string + useInternalDataplaneDriver: + description: UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: 'VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for VXLAN networking. Optional as Felix determines + this based on the existing IP pools. [Default: nil (unset)]' + type: boolean + vxlanMTU: + description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]' + type: integer + vxlanMTUV6: + description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]' + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: 'WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). + [Default: false]' + type: boolean + wireguardEnabledV6: + description: 'WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]' + type: boolean + wireguardHostEncryptionEnabled: + description: 'WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]' + type: boolean + wireguardInterfaceName: + description: 'WireguardInterfaceName specifies the name to use for + the IPv4 Wireguard interface. [Default: wireguard.cali]' + type: string + wireguardInterfaceNameV6: + description: 'WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]' + type: string + wireguardKeepAlive: + description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]' + type: string + wireguardListeningPort: + description: 'WireguardListeningPort controls the listening port used + by IPv4 Wireguard. [Default: 51820]' + type: integer + wireguardListeningPortV6: + description: 'WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]' + type: integer + wireguardMTU: + description: 'WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]' + type: integer + wireguardMTUV6: + description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]' + type: integer + wireguardRoutingRulePriority: + description: 'WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]' + type: integer + workloadSourceSpoofing: + description: WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: 'XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]' + type: boolean + xdpRefreshInterval: + description: 'XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico''s BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]' + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + nullable: true + type: integer + type: array + attributes: + description: Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + maximum: 2147483647 + minimum: 0 + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: 'Disable exporting routes from this IP Pool''s CIDR over + BGP. [Default: false]' + type: boolean + disabled: + description: When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + properties: + enabled: + description: When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: 'Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only.' + type: boolean + natOutgoing: + description: When nat-outgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]' + type: integer + required: + - controllers + type: object + status: + description: KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + node: + description: Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: 'AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]' + type: string + type: object + leakGracePeriod: + description: 'LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]' + type: string + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + syncLabels: + description: 'SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]' + type: string + type: object + policy: + description: Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + serviceAccount: + description: ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + workloadEndpoint: + description: WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: 'ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]' + type: string + type: object + type: object + debugProfilePort: + description: DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: 'EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]' + type: string + healthChecks: + description: 'HealthChecks enables or disables support for health + checks [Default: Enabled]' + type: string + logSeverityScreen: + description: 'LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]' + type: string + prometheusMetricsPort: + description: 'PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]' + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + egress: + description: The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR''d together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ipreservations + verbs: + - list +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + verbs: + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - kubecontrollersconfigurations + verbs: + - get + - create + - update + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +rules: +- apiGroups: + - "" + resourceNames: + - calico-node + resources: + - serviceaccounts/token + verbs: + - create +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - endpoints + - services + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - patch +- apiGroups: + - crd.projectcalico.org + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - caliconodestatuses + verbs: + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - crd.projectcalico.org + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete +- apiGroups: + - crd.projectcalico.org + resources: + - ipamconfigs + verbs: + - get + - create +- apiGroups: + - crd.projectcalico.org + resources: + - blockaffinities + verbs: + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-node + role.kubernetes.io/networking: "1" + name: calico-node + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-node + kops.k8s.io/managed-by: kops + spec: + containers: + - env: + - name: DATASTORE_TYPE + value: kubernetes + - name: WAIT_FOR_DATASTORE + value: "true" + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + - name: CLUSTER_TYPE + value: kops,bgp + - name: IP + value: autodetect + - name: IP6 + value: none + - name: IP_AUTODETECTION_METHOD + value: first-found + - name: IP6_AUTODETECTION_METHOD + value: none + - name: CALICO_IPV4POOL_IPIP + value: CrossSubnet + - name: CALICO_IPV4POOL_VXLAN + value: Never + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: CALICO_IPV4POOL_CIDR + value: 100.96.0.0/11 + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: ACCEPT + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + - name: FELIX_AWSSRCDSTCHECK + value: Disable + - name: FELIX_BPFENABLED + value: "false" + - name: FELIX_BPFEXTERNALSERVICEMODE + value: Tunnel + - name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED + value: "false" + - name: FELIX_BPFLOGLEVEL + value: "Off" + - name: FELIX_CHAININSERTMODE + value: insert + - name: FELIX_IPTABLESBACKEND + value: Auto + - name: FELIX_LOGSEVERITYSCREEN + value: info + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSMETRICSPORT + value: "9091" + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "false" + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "false" + - name: FELIX_WIREGUARDENABLED + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/node:v3.24.1@sha256:43f6cee5ca002505ea142b3821a76d585aa0c8d22bc58b7e48589ca7deb48c13 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-node + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + resources: + requests: + cpu: 100m + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /var/run/nodeagent + name: policysync + - mountPath: /sys/fs/bpf + name: bpffs + - mountPath: /var/log/calico/cni + name: cni-log-dir + readOnly: true + hostNetwork: true + initContainers: + - command: + - /opt/cni/bin/calico-ipam + - -upgrade + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + key: calico_backend + name: calico-config + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.24.1@sha256:e60b90d7861e872efa720ead575008bc6eca7bee41656735dcaa8210b688fcd9 + imagePullPolicy: IfNotPresent + name: upgrade-ipam + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - command: + - /opt/cni/bin/install + env: + - name: CNI_CONF_NAME + value: 10-calico.conflist + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + key: cni_network_config + name: calico-config + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CNI_MTU + valueFrom: + configMapKeyRef: + key: veth_mtu + name: calico-config + - name: SLEEP + value: "false" + envFrom: + - configMapRef: + name: kubernetes-services-endpoint + optional: true + image: docker.io/calico/cni:v3.24.1@sha256:e60b90d7861e872efa720ead575008bc6eca7bee41656735dcaa8210b688fcd9 + imagePullPolicy: IfNotPresent + name: install-cni + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - command: + - calico-node + - -init + - -best-effort + image: docker.io/calico/node:v3.24.1@sha256:43f6cee5ca002505ea142b3821a76d585aa0c8d22bc58b7e48589ca7deb48c13 + imagePullPolicy: IfNotPresent + name: mount-bpffs + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs + mountPropagation: Bidirectional + name: sys-fs + - mountPath: /var/run/calico + mountPropagation: Bidirectional + name: var-run-calico + - mountPath: /nodeproc + name: nodeproc + readOnly: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: calico-node + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /var/run/calico + name: var-run-calico + - hostPath: + path: /var/lib/calico + name: var-lib-calico + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + name: sys-fs + - hostPath: + path: /sys/fs/bpf + type: Directory + name: bpffs + - hostPath: + path: /proc + name: nodeproc + - hostPath: + path: /opt/cni/bin + name: cni-bin-dir + - hostPath: + path: /etc/cni/net.d + name: cni-net-dir + - hostPath: + path: /var/log/calico/cni + name: cni-log-dir + - hostPath: + path: /var/lib/cni/networks + name: host-local-net-dir + - hostPath: + path: /var/run/nodeagent + type: DirectoryOrCreate + name: policysync + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.projectcalico.org + app.kubernetes.io/managed-by: kops + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + name: calico-kube-controllers + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: calico-kube-controllers + kops.k8s.io/managed-by: kops + name: calico-kube-controllers + namespace: kube-system + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + image: docker.io/calico/kube-controllers:v3.24.1@sha256:4010b2739792ae5e77a750be909939c0a0a372e378f3c81020754efcf4a91efa + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + name: calico-kube-controllers + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 + priorityClassName: system-cluster-critical + serviceAccountName: calico-kube-controllers + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000..4e8a971 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,118 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-csi-1-21 +parameters: + encrypted: "true" + type: gp3 +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-events_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-main_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000..c130130 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 3, + "etcdVersion": "3.5.4" +} \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_kops-version.txt_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000..f2538f8 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.25.0 \ No newline at end of file diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2a_content new file mode 100644 index 0000000..e80a302 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2a_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2b_content new file mode 100644 index 0000000..e80a302 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2b_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2c_content new file mode 100644 index 0000000..e80a302 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2c_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2a_content new file mode 100644 index 0000000..dc8c171 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2a_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2b_content new file mode 100644 index 0000000..dc8c171 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2b_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2c_content new file mode 100644 index 0000000..dc8c171 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2c_content @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=s3://clusters.dev.datasaker.io/dev.datasaker.io/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.dev.datasaker.io --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 + --volume-tag=kubernetes.io/cluster/dev.datasaker.io=owned > /tmp/pipe 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831@sha256:a91fdaf9b988943a9c73d422348c2383c08dfc2566d4124a39a1b3d785018720 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000..7d66233 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.25.0@sha256:3a9fc2225bedd410645becec263d8e25c0f978406d46c54837422db978b8505a + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-a_content new file mode 100644 index 0000000..39e1c27 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-a_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-a + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-druid:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-b_content new file mode 100644 index 0000000..cc7452c --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-b_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-b + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-druid:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-c_content new file mode 100644 index 0000000..0c9d046 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-druid-c_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-druid + kops.k8s.io/instancegroup: dev-data-druid-c + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-druid:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-a_content new file mode 100644 index 0000000..7f9995f --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-a_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-a + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-kafka:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-b_content new file mode 100644 index 0000000..b1ab5bc --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-b_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-b + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-kafka:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-c_content new file mode 100644 index 0000000..ee14f7c --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-data-kafka-c_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: data-kafka + kops.k8s.io/instancegroup: dev-data-kafka-c + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/data-kafka:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content new file mode 100644 index 0000000..f9e52b2 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-a + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/mgmt:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content new file mode 100644 index 0000000..c1cee3d --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-b + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/mgmt:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-c_content new file mode 100644 index 0000000..542a2cf --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-mgmt-c_content @@ -0,0 +1,89 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: mgmt + kops.k8s.io/instancegroup: dev-mgmt-c + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - dev/mgmt:NoSchedule +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-a_content new file mode 100644 index 0000000..339bdcb --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-a_content @@ -0,0 +1,87 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-a + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-b_content new file mode 100644 index 0000000..3510897 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-b_content @@ -0,0 +1,87 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-b + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-c_content new file mode 100644 index 0000000..e8005db --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-dev-process-c_content @@ -0,0 +1,87 @@ +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 +CAs: + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "7142721951268583043543051771" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + datasaker/group: process + kops.k8s.io/instancegroup: dev-process-c + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content new file mode 100644 index 0000000..b40b701 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content @@ -0,0 +1,288 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + - 8f01da0b6bf77d36b28840186c5387ecda831036d0d671805b6a5367bdd1b284@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-amd64 + - 7ba778d62bbca3ec158c62279713ef774f695f341e264ea572a0b7cbdd022071@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 + - dead6a79da3a04785c25b03fef625b3d220bf77e2b0750b525023c48a70f4081@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-arm64 + - 609d23833768046d3626eba1c8dd620ce86d7235bbe3073f4c6241f26c31e456@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta2 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2a + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2a.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content new file mode 100644 index 0000000..f70ebe1 --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content @@ -0,0 +1,288 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + - 8f01da0b6bf77d36b28840186c5387ecda831036d0d671805b6a5367bdd1b284@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-amd64 + - 7ba778d62bbca3ec158c62279713ef774f695f341e264ea572a0b7cbdd022071@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 + - dead6a79da3a04785c25b03fef625b3d220bf77e2b0750b525023c48a70f4081@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-arm64 + - 609d23833768046d3626eba1c8dd620ce86d7235bbe3073f4c6241f26c31e456@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta2 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2b + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2b.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2b.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content new file mode 100644 index 0000000..fa16e2a --- /dev/null +++ b/terraform/tf-kops-dev-20221025/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content @@ -0,0 +1,288 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 3 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + image: registry.k8s.io/kube-apiserver:v1.25.2@sha256:86e7b79379dddf58d7b7189d02ca96cc7e07d18efa4eb42adcaa4cf94531b96e + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.dev.datasaker.io + serviceAccountJWKSURI: https://api.internal.dev.datasaker.io/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4UK3R2fjYWGtlIJU3nBS + UTIX9Eg+vp9Uw4zMhkz1K5BnyB2IsKR0F9LnMdLaTrF7Zo1Bef82Ew80eKS0JwY5 + NOj+ZP9FiC7bVRRdeuW5KMGjEmhWSz/mVahxgo0pRE9xP3yA2Ij1lQjn3R0Yr6ec + E+fwjAF2o93L+KpBzcXrpGiPa0+Qx1I8VPKLyLjM/SfK3eBUcouNbWeGi8+DULAf + DHMUA7B6U+w/IbEd3kVCTSWEBK+R2CAl8sIMZ424wGnNX58G4yy2uGYlcOItTZzU + fPt9ulI1DYvycFTkPzedFu+KF5GlulcqMqmPRANWDSj26gDmahVoraO0eQ9vCDhp + vwIDAQAB + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubelet + - 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/amd64/kubectl + - 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz + - 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz + - db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 + - 8f01da0b6bf77d36b28840186c5387ecda831036d0d671805b6a5367bdd1b284@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-amd64 + - 7ba778d62bbca3ec158c62279713ef774f695f341e264ea572a0b7cbdd022071@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-amd64 + arm64: + - c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubelet + - b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5@https://storage.googleapis.com/kubernetes-release/release/v1.25.2/bin/linux/arm64/kubectl + - ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz + - b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd@https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-arm64.tar.gz + - dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64 + - dead6a79da3a04785c25b03fef625b3d220bf77e2b0750b525023c48a70f4081@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.25.0/protokube-linux-arm64 + - 609d23833768046d3626eba1c8dd620ce86d7235bbe3073f4c6241f26c31e456@https://artifacts.k8s.io/binaries/kops/1.25.0/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.25.0/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIDDDCCAfSgAwIBAgIMFxRSNNb6vi6f8FSFMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIyMDkxMTA0NDkwOVoX + DTMyMDkxMDA0NDkwOVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2CwCYipJHeykxywc/ + wcAZQzTt49XYDHsTnMPtdSkF4Qdy+cwRi1SpL5cpO9ByqGwZ7exXKhe6EAOhfmmG + yZgDvI95434tp6a64mbBmCrR+4NIKDIkoXIrhEGogbJlDij/K63yVCAZCPulyj7G + VyE7X4bEmvuAbYDeJheX+ZFGhV5iLS2fri13NMEp9a9nms22V9hJitLxzV3LLdl5 + db/q3LMb96xl27ccbcSyz5gEuKJfvKqEb7bCVg6yJbdbVO+CMLpnIMFsiXwwSyO0 + xXrCzyeNHAB9eK/n0gGkWb/RKoLqXTUNdGu4SvaPYnTJKAT2eHvBNAlPt5rJO5Kt + Yz4xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBT1GhQw65WfbiDWTeUx5k1xHMz/ajANBgkqhkiG9w0BAQsFAAOCAQEA + Uih4ajNq0Yys9IFBziOT+W2rxdodQOzcJpXWTdSNnxRzOtasjiYUoGdzdizT54Y4 + wjtWnBGgB+sre3pTF8TNnv/AlBLx8t0ANOifcncPLRFsBtJVDFCuglPXrn5cHDOr + anLTIzQ3etoDV/h2AQxQafYUg9ZtwgyEbou7kwLi+p9TBJdV3iWowfdgs9HtHagd + wL0/v6RU8pojl7hBYIloGB1AIREDSfprxDMzUBDyOY7uyvcfK+RcUoLRuq6Tq2ob + PsOtl3ZaSTOmdQ0r8SEUMtOm0jozbyRu9ojq7/+UOu3yT1YeM4M7N6lYNtZx153O + ILB6F+I/dTp9EdI/qBNrqg== + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIC/DCCAeSgAwIBAgIMFxRSNNaYe6a0fhC7MA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5MTAw + NDQ5MDlaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAJdTYAp2rgiShljdkdR/P1kt81okDYl1q+/6rUS4 + L8AwJDtbIIvQcmgRgoR3mlhRBQIibeHSWHNlt99TYzkUeQF8n2cE3MJbSNmykGqf + A8CxluTyL32TDnsRbonQoDK5wKbWpCFD1KD7P/aozOdsoDlPV18Y46dZ4j3Yv2C1 + ppaUmv0hQ62eLeDXQlq1e7VFmwiij/lsW/bNXI6r/ENFRbCsfhCCY5xkoOeWPrFJ + ci68UbzQssmR0xlcGbCtcxfwmsPi0C9Php5mtpmRWa9uTGbSK3ZD1jx98S2OWWVe + 1jiCmIyzsqY31QioOveWaCL14JqArO2FqrugXx2ZxAI1OSkCAwEAAaNCMEAwDgYD + VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA4wbacZ59AB + n3dc7WLWkb9TF+CUMA0GCSqGSIb3DQEBCwUAA4IBAQBQn+1DUIZOkgTwUmW3Nnt8 + sWUV7NRy3ZdB9lEbWWwodNRheYMEHUe8y/Z2VvWiYNKA9K0lVYpu0MGF6HiClqhN + FWU7eFv6uVGf2ypBNTy5cz+PNYAfxl9U4gBGJRKzuKOICFHp7laKzBuiwk934Daa + xeZeA+7Pt23o52APhXVXTKf3U5v/97e631rOfnE+o9D6mL3XnWj5vZ4/1moQD1nm + eyRJXT1LaKULk52o52c4O6FIgniit746qyakIllhUk5vMsnlXTjO2v16iyi2i62z + jhx8pJzZ2phPBcSjDR+Bm4WbAKvZjAUFQ6MjgqXxxTDtGy52erAzXmjLeqBsHrvi + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDCjCCAfKgAwIBAgIMFxRSNNcAFGGHjduQMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjIwOTExMDQ0OTA5WhcN + MzIwOTEwMDQ0OTA5WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3KRK8906XyxKwYZcISZO + uEYgQ2WGAZNQXgvxbb5GBAM4f9Pv0JuoAL0uy9qpyqQDq6ACe5jICyvg3+9LU+pW + GDxubYHb6f15BJtw36zO6Mgs5BTjrW9zxjJSzZIoGDL7zw+d7B7bASAfuIWZfmmm + lMQg/pnywbG1jPTB1rEVOryOHMXntXe6C/CpxTZz66AYYd6+7GrCLC8uHG5PyEie + tv7avgRb06RKJQSJ3reGRHJ8UI9bJduTlaQyZpCmfxpqnK7E57SFSuzbcYi/iMGY + GUZCfR8tLtsMjDYTxsTCvBQWuVP3FJXS1KKoyfgfQ4AvNhzo/I5K9ZGGb24CvtzZ + +QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQU0pBv8lYo6UyaXEX7P7KPMEIll1kwDQYJKoZIhvcNAQELBQADggEBAG7C + vDSF0dAEyThlrhzpUBZX6dLWwRtsGqXmsqS7TTvSExiDxl+27llAVVb6DIg5b3Lp + fa4P5cDcflFaNsWz/vCwkB9yoiUm2tCqxRkr1LKY9FIV/FUGwE5imr7HyGmpcbKh + xCC+57ZHXuZj7oZsBoTyCVjj+PX6UmqsTMG6GEOuvDvrzqKI1h3WSMtovRjLUmCX + cPrwOJJoKzy1gWCNsILSwFmSyklsjIzVFliXp+Si0IHwHwqmVn9JEnz64A5C5nkB + jBOFXTznDiPWOmNc2RYumSpNl0srm5fqR9FA21H4DOJI4VmpK8YWwSmwNmmwAZoS + XOkBupErXPmZkj/8CEk= + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNc6k2RDt+raMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChc/xsdUXOfkMw/TiONzQ5ymzO + 4i7seuBYgbBriR1n0PyCFoAmNXMvVt+JtytvBzr0FfPnpjpO+xb+L1MY2m7Rbx4F + 5brrJN1LwFlZOQjKCpgxOUT+EFVneXvmZx7E0UbJ+TxEGGOZ1N6t1mxdmsdjO0TV + mhMg6Nawj1+HAQsdgkMDAWv3PEgUeJCrRg+7KzBQxY0pOVuZkeQZ+MHsR3GLdIZn + l3h13ePS6Z1K+Uz4VMR4myV1wXFyOR1Qms7ROZ3wIiCoE/Vqg9bn70funi4PMG0l + /Bxj9t2ogMOla7ypNzcwjNRtzhdmuAaEvdrvZ6XF4NXWM8DpjiR9dA3Y0dffAgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBTD5SaTxIzni41qVldUtl9SqcBM7TANBgkqhkiG9w0BAQsFAAOCAQEANyBp0bU4 + gJrgLLnjKC/atTvHRzCrLmHz9ECa9bN9I6dAsiyJmxGPPlD+PkhFNxzenQ80VizD + qo+w9RQGDtfMD5WX0A8M4KN5A8efTBhWReI9lzxGaRxUwQRiKXBRgn778nFZ7E/5 + 9DmDlibhdb1XEz0X+l6XkNyJdHHsCPi2omKRY6R9W7+/ezvkH6mqAcTC7DufWB77 + T3sr6lmFR69isQB0kQlhXG/Ws+g6zN7CyRP741sQAPWYfRaziLYSTcdnFHMBNRHc + zm3DVnbPCrjV7zjSdoNbPgPvEvZYGMSnK0tfxhYKTVRT8cKWlBBwnPYMKW/O0ED0 + Z2RjK1J0AFawFQ== + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIMFxRSNNgftEHrucqUMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIyMDkxMTA0NDkwOVoXDTMy + MDkxMDA0NDkwOVowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwggEi + MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAA6jobVkkVeddp9oTaVMbthfB + dforGm4J/E3KBBmA5+3HXknFZ+nXAK0naZUS2RrHUrigTcux1no1Om3eTJCcxmOR + IIFYAjX3vpMXhOMCgh98U/BrN96xdaRPRNF5lwluc26ZLRcS7Y+HeZwORCB0auX4 + 5XZFb72CT2kfWaqnsum7YC/r/aJzUS1dIrGZwKBYCZct3TfCZTzW4aL6rkHdrriJ + KNIaV1FR/n6X2hdTpVnHou/mk5Zr0WYz1YaAlJIqHJEavrYIjLp6pWgsho8ESB+D + WHEm+cHNVFMuVm++5OWr5PZNLawD44MUomH/DlTVK0B9qdS3gQ6X4Hx6gDS3AgMB + AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBRozlb1pjT7aWt9Kg70JkqBH6y4BzANBgkqhkiG9w0BAQsFAAOCAQEApP3tYKOy + hy2AGVeTOfh5YSKuSQQJjyy5mBuHIpB0vYcukSABn+60n7Ku4hAGERucscBjHpWy + 55BBRDjVvY1jlB4AJKRmlAlGngmwhz9KO86EvxXzJaDfxd92rDY1iOF3DM9UNUCI + vlvVA1ws7XhWLlUPZf+Ndpj7s1ar46htDy0ONchhXiokzNcDqNtMgSZzS1+WJY+n + n5BjbIO91sQqLsd4DHLVi9ZWcr4LyS9hYSFPSNAPOnNsGnj3WcWTcctH8yUxhzwZ + 1Cty74gyfTtTENm5dZk+wAjkxTkixO+18NG0PCXos/1FONthR521u3qqLXSZNYL0 + u1zeRMpGpRYUtA== + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIDAjCCAeqgAwIBAgIMFxRSNNb5wROslOvTMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMjA5MTEwNDQ5MDlaFw0zMjA5 + MTAwNDQ5MDlaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMN1BKqeJVUBLg1gS8GIZzld/MG8Xgod + F4DQKxYYVI9mmkEpP5nhesYQ8qnnqW6js9URF5GXUoaeiaM/krigc4yYm7YRts7B + Lzbd6Mlfo8LaHX5GXE0xHRcW29NmaGq8UbcEmTTxc5EgbBNS/Tfai71HGaO0VmrA + P6SbNMrgSAlfap1caLQ8CcUASDqEf+BcjZhgetddqSL2KLkL5ot7IxOS2blzQH/I + Jk/2Boi36yQ5JoLPbs/TRAV4wHMci3B9ZNHQrdcqP2zl0zC64eNt5fNgo+F/iH/z + 2M32O+V3HpOJDvFtSC+Q9Ux3kOC4/dmembZex8IPAGJ4IfCyL3cwJYUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMpi + L3tJgzuP+QDY3uyx99aMAB0sMA0GCSqGSIb3DQEBCwUAA4IBAQCO1OS0DYntM4ut + ZNZIkJA+SAFKy06IAev3o9wBiOzlIM5rVm4TDa0L7qFH/Z2l9bRmWDqDeba281qZ + EIFGJI1QPAWX47RbQXJOTOIiGsNoUw4swt6it+NoemARwZAoGPYOXqXLVknXalR5 + ye33OaoI0EowrHw01sv72mbEqeWhb9XKw3h1UkbfdkZIG9KiftYVAlPUNUSaSy8n + ApKbqEw2CcRjSPjeLeS9zbLSj+M20NYlwU56xaxIm64TRk65Ac17PN5KJiOHYuDp + 1fnHqnbPbOOMdfhuRU1D48sSZlAKFiR3p0vLkSNwfmJmWRTfWuAUNAA339CRTKOb + Ge9OTWOZ + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIC+DCCAeCgAwIBAgIMFxRSNNnsf/9iL637MA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjIwOTExMDQ0OTA5WhcNMzIwOTEwMDQ0 + OTA5WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMIIBIjANBgkqhkiG9w0BAQEF + AAOCAQ8AMIIBCgKCAQEAuWlsK26NCl/z8mUJ0hVq8a6CxuhhZO76ZKxza4gjpNSZ + hrnC1kyQed8zjDln2APE20OE2or6nEWmjWWZJkr3wToQygFDj/5SuL4WwF1V2Fcz + iaHcLz9oFva/EgJfWgZ/W/aaXWJRNsFVN8CAt1Z43wEZwmbKjykQ83IUIng3/z3t + /eRAx1wc+3ahMqbZD7hOCihCKbaNc3FGzPOvu/1AC/6TyxV/nwqfaroW5MbC3/Dt + UmrZJk5titRTG8aU9i7ZviMLAuHd8nZBzjIeqp95AdAv6nMVV9RveRz64Yip/B4Z + h0GMczJ8VmXQN8Dq6xz4eE7Hx0962Y+xQklEan1vfQIDAQABo0IwQDAOBgNVHQ8B + Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUa5SJcwBuRj6rJ2OL + hEsmhX/nGcQwDQYJKoZIhvcNAQELBQADggEBABtVPrhKPIFuPM8uQbQHGC2AV7Ap + afIKWHx2gXtf3uDfBe52Xa2WI/nExnQE+MM0iFU3Bgq1aYe9/rJmkptZJuD6vfkz + VWhuIGGkyxYSxsuBo8UYdzsWpSzP8dH3Mip3PNNS3F3bcU3z5uufuOZUmdIn+NPS + qgBgxpqCVy/KzRVp30sM4uj9i6bXB/CioE1oUssPchrB8uWV+THZEfle1TSgK9sg + jFx1R0mqhxH/eW6UsHJPgf14mdjEGiimaamvWcY7CjhuFwYog42ltgrgW9HzMtJM + cEc9lRITKurTr0TWW+x1yDeCaKd/1ZGjFVtQMUYyV+GAfKsAOtDCUPGF9dA= + -----END CERTIFICATE----- +ClusterName: dev.datasaker.io +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta2 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "7142721951056419283723637893" + etcd-clients-ca: "7142721951028761584467841211" + etcd-manager-ca-events: "7142721951057921435241405328" + etcd-manager-ca-main: "7142721951074386633614158554" + etcd-peers-ca-events: "7142721951138880539659455124" + etcd-peers-ca-main: "7142721951056140991529806803" + kubernetes-ca: "7142721951268583043543051771" + service-account: "7142721951191621691964241737" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + featureGates: + CSIMigrationAWS: "true" + InTreePluginAWSUnregister: "true" + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + kops.k8s.io/instancegroup: master-ap-northeast-2c + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: false + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/addons/bootstrap-channel.yaml +containerdConfig: + configOverride: | + version = 2 + imports = ["/etc/containerd/runtime_*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.6@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth] + username = "datasaker" + password = "dckr_pat_kQP6vcHm_jMChWd_zvgH_G3kucc" + logLevel: info + runc: + version: 1.1.4 + version: 1.6.8 +etcdManifests: +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2c.yaml +- s3://clusters.dev.datasaker.io/dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2c.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml +useInstanceIDForNodeName: true diff --git a/terraform/tf-kops-dev-20221025/kubernetes.tf b/terraform/tf-kops-dev-20221025/kubernetes.tf new file mode 100644 index 0000000..51f72ff --- /dev/null +++ b/terraform/tf-kops-dev-20221025/kubernetes.tf @@ -0,0 +1,2842 @@ +locals { + cluster_name = "dev.datasaker.io" + master_autoscaling_group_ids = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] + master_security_group_ids = [aws_security_group.masters-dev-datasaker-io.id] + masters_role_arn = aws_iam_role.masters-dev-datasaker-io.arn + masters_role_name = aws_iam_role.masters-dev-datasaker-io.name + node_autoscaling_group_ids = [aws_autoscaling_group.dev-data-druid-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-druid-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-druid-c-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-c-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] + node_security_group_ids = [aws_security_group.nodes-dev-datasaker-io.id] + node_subnet_ids = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0c875e254456809f7"] + nodes_role_arn = aws_iam_role.nodes-dev-datasaker-io.arn + nodes_role_name = aws_iam_role.nodes-dev-datasaker-io.name + region = "ap-northeast-2" + subnet_ap-northeast-2a_id = "subnet-0c875e254456809f7" + subnet_ap-northeast-2b_id = "subnet-05672a669943fc12f" + subnet_ap-northeast-2c_id = "subnet-0940fd78504acbbde" + subnet_ids = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0a5d787353f874684", "subnet-0c875e254456809f7", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] + subnet_utility-ap-northeast-2a_id = "subnet-0de55619bee2411f8" + subnet_utility-ap-northeast-2b_id = "subnet-0a5d787353f874684" + subnet_utility-ap-northeast-2c_id = "subnet-0ee26ffc561efb292" + vpc_id = "vpc-0b6e0b906c678a22f" +} + +output "cluster_name" { + value = "dev.datasaker.io" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-ap-northeast-2a-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2b-masters-dev-datasaker-io.id, aws_autoscaling_group.master-ap-northeast-2c-masters-dev-datasaker-io.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-dev-datasaker-io.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-dev-datasaker-io.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-dev-datasaker-io.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.dev-data-druid-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-druid-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-druid-c-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-a-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-b-dev-datasaker-io.id, aws_autoscaling_group.dev-data-kafka-c-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-a-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-b-dev-datasaker-io.id, aws_autoscaling_group.dev-mgmt-c-dev-datasaker-io.id, aws_autoscaling_group.dev-process-a-dev-datasaker-io.id, aws_autoscaling_group.dev-process-b-dev-datasaker-io.id, aws_autoscaling_group.dev-process-c-dev-datasaker-io.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-dev-datasaker-io.id] +} + +output "node_subnet_ids" { + value = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0c875e254456809f7"] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-dev-datasaker-io.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-dev-datasaker-io.name +} + +output "region" { + value = "ap-northeast-2" +} + +output "subnet_ap-northeast-2a_id" { + value = "subnet-0c875e254456809f7" +} + +output "subnet_ap-northeast-2b_id" { + value = "subnet-05672a669943fc12f" +} + +output "subnet_ap-northeast-2c_id" { + value = "subnet-0940fd78504acbbde" +} + +output "subnet_ids" { + value = ["subnet-05672a669943fc12f", "subnet-0940fd78504acbbde", "subnet-0a5d787353f874684", "subnet-0c875e254456809f7", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] +} + +output "subnet_utility-ap-northeast-2a_id" { + value = "subnet-0de55619bee2411f8" +} + +output "subnet_utility-ap-northeast-2b_id" { + value = "subnet-0a5d787353f874684" +} + +output "subnet_utility-ap-northeast-2c_id" { + value = "subnet-0ee26ffc561efb292" +} + +output "vpc_id" { + value = "vpc-0b6e0b906c678a22f" +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "files" + region = "ap-northeast-2" +} + +resource "aws_autoscaling_group" "dev-data-druid-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-druid-a-dev-datasaker-io.id + version = aws_launch_template.dev-data-druid-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-druid-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-druid-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-druid" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-data-druid-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-druid-b-dev-datasaker-io.id + version = aws_launch_template.dev-data-druid-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-druid-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-druid-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-druid" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-data-druid-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-druid-c-dev-datasaker-io.id + version = aws_launch_template.dev-data-druid-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-druid-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-druid-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-druid" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-druid-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_autoscaling_group" "dev-data-kafka-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-kafka-a-dev-datasaker-io.id + version = aws_launch_template.dev-data-kafka-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-kafka-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-kafka-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-kafka" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-data-kafka-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-kafka-b-dev-datasaker-io.id + version = aws_launch_template.dev-data-kafka-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-kafka-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-kafka-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-kafka" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-data-kafka-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-data-kafka-c-dev-datasaker-io.id + version = aws_launch_template.dev-data-kafka-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-data-kafka-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-data-kafka-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "data-kafka" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-data-kafka-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_autoscaling_group" "dev-mgmt-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-a-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-mgmt-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-b-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-mgmt-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-mgmt-c-dev-datasaker-io.id + version = aws_launch_template.dev-mgmt-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-mgmt-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-mgmt-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "mgmt" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-mgmt-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_autoscaling_group" "dev-process-a-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-a-dev-datasaker-io.id + version = aws_launch_template.dev-process-a-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-a.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-a.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "dev-process-b-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-b-dev-datasaker-io.id + version = aws_launch_template.dev-process-b-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-b.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-b.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "dev-process-c-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.dev-process-c-dev-datasaker-io.id + version = aws_launch_template.dev-process-c-dev-datasaker-io.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "dev-process-c.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "dev-process-c.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" + propagate_at_launch = true + value = "process" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "dev-process-c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2a-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2a-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2a.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2a" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0c875e254456809f7"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2b-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2b-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2b.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2b" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-05672a669943fc12f"] +} + +resource "aws_autoscaling_group" "master-ap-northeast-2c-masters-dev-datasaker-io" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.id + version = aws_launch_template.master-ap-northeast-2c-masters-dev-datasaker-io.latest_version + } + load_balancers = [aws_elb.api-dev-datasaker-io.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "dev.datasaker.io" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-ap-northeast-2c.masters.dev.datasaker.io" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-ap-northeast-2c" + } + tag { + key = "kubernetes.io/cluster/dev.datasaker.io" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = ["subnet-0940fd78504acbbde"] +} + +resource "aws_ebs_volume" "a-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "a-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2a" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "a.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "a/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "b-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2b" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "b.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "b/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-events-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-events.dev.datasaker.io" + "k8s.io/etcd/events" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "c-etcd-main-dev-datasaker-io" { + availability_zone = "ap-northeast-2c" + encrypted = true + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "c.etcd-main.dev.datasaker.io" + "k8s.io/etcd/main" = "c/a,b,c" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_elb" "api-dev-datasaker-io" { + connection_draining = true + connection_draining_timeout = 300 + cross_zone_load_balancing = false + health_check { + healthy_threshold = 2 + interval = 10 + target = "SSL:443" + timeout = 5 + unhealthy_threshold = 2 + } + idle_timeout = 300 + listener { + instance_port = 443 + instance_protocol = "TCP" + lb_port = 443 + lb_protocol = "TCP" + } + name = "api-dev-datasaker-io-ru2qna" + security_groups = [aws_security_group.api-elb-dev-datasaker-io.id] + subnets = ["subnet-0a5d787353f874684", "subnet-0de55619bee2411f8", "subnet-0ee26ffc561efb292"] + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + role = aws_iam_role.masters-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + role = aws_iam_role.nodes-dev-datasaker-io.name + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "masters-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.dev.datasaker.io_policy") + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role" "nodes-dev-datasaker-io" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.dev.datasaker.io_policy") + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-dev-datasaker-io" { + name = "masters.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_masters.dev.datasaker.io_policy") + role = aws_iam_role.masters-dev-datasaker-io.name +} + +resource "aws_iam_role_policy" "nodes-dev-datasaker-io" { + name = "nodes.dev.datasaker.io" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.dev.datasaker.io_policy") + role = aws_iam_role.nodes-dev-datasaker-io.name +} + +resource "aws_key_pair" "kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6" { + key_name = "kubernetes.dev.datasaker.io-c8:01:5e:c8:c1:4f:2a:1b:71:6c:21:3a:5c:04:7b:d6" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.dev.datasaker.io-c8015ec8c14f2a1b716c213a5c047bd6_public_key") + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } +} + +resource "aws_launch_template" "dev-data-druid-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-druid-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-druid-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-druid-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-druid-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-druid-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-druid-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-druid-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-druid-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-druid" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-druid-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-druid-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-druid-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-kafka-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-kafka-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-kafka-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-kafka-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-kafka-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-kafka-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-data-kafka-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-data-kafka-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-data-kafka-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "data-kafka" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-data-kafka-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-data-kafka-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-mgmt-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.xlarge" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-mgmt-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-mgmt-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "mgmt" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-mgmt-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-mgmt-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-mgmt-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-a-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-a.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-a.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-a" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-a.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-b-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-b.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-b.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-b" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-b.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "dev-process-c-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "m6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "dev-process-c.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "dev-process-c.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/datasaker/group" = "process" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "dev-process-c" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "dev-process-c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_dev-process-c.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2a-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "c6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2a.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2a.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2a" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2a.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2b-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "c6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2b.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2b.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2b" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2b.masters.dev.datasaker.io_user_data") +} + +resource "aws_launch_template" "master-ap-northeast-2c-masters-dev-datasaker-io" { + block_device_mappings { + device_name = "/dev/sda1" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 50 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-dev-datasaker-io.id + } + image_id = "ami-0abb33b73a78cae31" + instance_type = "c6i.large" + key_name = aws_key_pair.kubernetes-dev-datasaker-io-c8015ec8c14f2a1b716c213a5c047bd6.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 3 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-ap-northeast-2c.masters.dev.datasaker.io" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-dev-datasaker-io.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + } + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "master-ap-northeast-2c.masters.dev.datasaker.io" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-ap-northeast-2c" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-ap-northeast-2c.masters.dev.datasaker.io_user_data") +} + +resource "aws_route53_record" "api-dev-datasaker-io" { + alias { + evaluate_target_health = false + name = aws_elb.api-dev-datasaker-io.dns_name + zone_id = aws_elb.api-dev-datasaker-io.zone_id + } + name = "api.dev.datasaker.io" + type = "A" + zone_id = "/hostedzone/Z072735718G25WNVKU834" +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "dev.datasaker.io/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-aws-cloud-controller-addons-k8s-io-k8s-1-18" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content") + key = "dev.datasaker.io/addons/aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content") + key = "dev.datasaker.io/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-bootstrap" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-bootstrap_content") + key = "dev.datasaker.io/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "dev.datasaker.io/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "dev.datasaker.io/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "dev.datasaker.io/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-leader-migration-rbac-addons-k8s-io-k8s-1-23" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-leader-migration.rbac.addons.k8s.io-k8s-1.23_content") + key = "dev.datasaker.io/addons/leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-limit-range-addons-k8s-io" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-limit-range.addons.k8s.io_content") + key = "dev.datasaker.io/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-networking-projectcalico-org-k8s-1-25" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-networking.projectcalico.org-k8s-1.25_content") + key = "dev.datasaker.io/addons/networking.projectcalico.org/k8s-1.25.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "dev-datasaker-io-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_dev.datasaker.io-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "dev.datasaker.io/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "dev.datasaker.io/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "dev.datasaker.io/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "dev.datasaker.io/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-ap-northeast-2a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2a_content") + key = "dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-ap-northeast-2b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2b_content") + key = "dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2b.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-ap-northeast-2c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-ap-northeast-2c_content") + key = "dev.datasaker.io/manifests/etcd/events-master-ap-northeast-2c.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-ap-northeast-2a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2a_content") + key = "dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-ap-northeast-2b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2b_content") + key = "dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2b.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-ap-northeast-2c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-ap-northeast-2c_content") + key = "dev.datasaker.io/manifests/etcd/main-master-ap-northeast-2c.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "dev.datasaker.io/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-druid-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-druid-a_content") + key = "dev.datasaker.io/igconfig/node/dev-data-druid-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-druid-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-druid-b_content") + key = "dev.datasaker.io/igconfig/node/dev-data-druid-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-druid-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-druid-c_content") + key = "dev.datasaker.io/igconfig/node/dev-data-druid-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-kafka-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-kafka-a_content") + key = "dev.datasaker.io/igconfig/node/dev-data-kafka-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-kafka-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-kafka-b_content") + key = "dev.datasaker.io/igconfig/node/dev-data-kafka-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-data-kafka-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-data-kafka-c_content") + key = "dev.datasaker.io/igconfig/node/dev-data-kafka-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-a_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-b_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-mgmt-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-mgmt-c_content") + key = "dev.datasaker.io/igconfig/node/dev-mgmt-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-a_content") + key = "dev.datasaker.io/igconfig/node/dev-process-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-b_content") + key = "dev.datasaker.io/igconfig/node/dev-process-b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-dev-process-c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-dev-process-c_content") + key = "dev.datasaker.io/igconfig/node/dev-process-c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2a" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2a_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2b" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2b_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2b/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-ap-northeast-2c" { + bucket = "clusters.dev.datasaker.io" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-ap-northeast-2c_content") + key = "dev.datasaker.io/igconfig/master/master-ap-northeast-2c/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "api-elb-dev-datasaker-io" { + description = "Security group for api ELB" + name = "api-elb.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "api-elb.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group" "masters-dev-datasaker-io" { + description = "Security group for masters" + name = "masters.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "masters.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group" "nodes-dev-datasaker-io" { + description = "Security group for nodes" + name = "nodes.dev.datasaker.io" + tags = { + "KubernetesCluster" = "dev.datasaker.io" + "Name" = "nodes.dev.datasaker.io" + "kubernetes.io/cluster/dev.datasaker.io" = "owned" + } + vpc_id = "vpc-0b6e0b906c678a22f" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-masters-dev-datasaker-io" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-nodes-dev-datasaker-io" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-443to443-api-elb-dev-datasaker-io" { + from_port = 443 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-api-elb-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.masters-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-4-0to0-masters-dev-datasaker-io" { + from_port = 0 + protocol = "4" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-all-0to0-nodes-dev-datasaker-io" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-1to2379-masters-dev-datasaker-io" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-2382to4000-masters-dev-datasaker-io" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-tcp-4003to65535-masters-dev-datasaker-io" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-dev-datasaker-io-ingress-udp-1to65535-masters-dev-datasaker-io" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.nodes-dev-datasaker-io.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "https-elb-to-master" { + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-dev-datasaker-io.id + source_security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmpv6-pmtu-api-elb-__--0" { + from_port = -1 + ipv6_cidr_blocks = ["::/0"] + protocol = "icmpv6" + security_group_id = aws_security_group.api-elb-dev-datasaker-io.id + to_port = -1 + type = "ingress" +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +}