kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

View File

@@ -0,0 +1,141 @@
#Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
#Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube_control_plane.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}
EOF
}
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = aws_iam_role.kube-worker.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube_control_plane" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube_control_plane.name
}
resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile"
role = aws_iam_role.kube-worker.name
}

View File

@@ -0,0 +1,7 @@
output "kube_control_plane-profile" {
value = aws_iam_instance_profile.kube_control_plane.name
}
output "kube-worker-profile" {
value = aws_iam_instance_profile.kube-worker.name
}

View File

@@ -0,0 +1,3 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}

View File

@@ -0,0 +1,41 @@
# Create a new AWS NLB for K8S API
resource "aws_lb" "aws-nlb-api" {
name = "kubernetes-nlb-${var.aws_cluster_name}"
load_balancer_type = "network"
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
idle_timeout = 400
enable_cross_zone_load_balancing = true
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
}))
}
# Create a new AWS NLB Instance Target Group
resource "aws_lb_target_group" "aws-nlb-api-tg" {
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
port = var.k8s_secure_api_port
protocol = "TCP"
target_type = "ip"
vpc_id = var.aws_vpc_id
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
interval = 30
protocol = "HTTPS"
path = "/healthz"
}
}
# Create a new AWS NLB Listener listen to target group
resource "aws_lb_listener" "aws-nlb-api-listener" {
load_balancer_arn = aws_lb.aws-nlb-api.arn
port = var.aws_nlb_api_port
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
}
}

View File

@@ -0,0 +1,11 @@
output "aws_nlb_api_id" {
value = aws_lb.aws-nlb-api.id
}
output "aws_nlb_api_fqdn" {
value = aws_lb.aws-nlb-api.dns_name
}
output "aws_nlb_api_tg_arn" {
value = aws_lb_target_group.aws-nlb-api-tg.arn
}

View File

@@ -0,0 +1,30 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_vpc_id" {
description = "AWS VPC ID"
}
variable "aws_nlb_api_port" {
description = "Port for AWS NLB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = list(string)
}
variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets"
type = list(string)
}
variable "default_tags" {
description = "Tags for all resources"
type = map(string)
}

View File

@@ -0,0 +1,137 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = var.aws_vpc_cidr_block
#DNS Related Entries
enable_dns_support = true
enable_dns_hostnames = true
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-vpc"
}))
}
resource "aws_eip" "cluster-nat-eip" {
count = length(var.aws_cidr_subnets_public)
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
}))
}
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_public)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
cidr_block = element(var.aws_cidr_subnets_public, count.index)
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}))
}
resource "aws_nat_gateway" "cluster-nat-gateway" {
count = length(var.aws_cidr_subnets_public)
allocation_id = element(aws_eip.cluster-nat-eip.*.id, count.index)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_private)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
cidr_block = element(var.aws_cidr_subnets_private, count.index)
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}))
}
#Routing in VPC
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" {
vpc_id = aws_vpc.cluster-vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
}
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
}))
}
resource "aws_route_table" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
vpc_id = aws_vpc.cluster-vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
}
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
}))
}
resource "aws_route_table_association" "kubernetes-public" {
count = length(var.aws_cidr_subnets_public)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
route_table_id = aws_route_table.kubernetes-public.id
}
resource "aws_route_table_association" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
subnet_id = element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)
route_table_id = element(aws_route_table.kubernetes-private.*.id, count.index)
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
}))
}
resource "aws_security_group_rule" "allow-all-ingress" {
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = [var.aws_vpc_cidr_block]
security_group_id = aws_security_group.kubernetes.id
}
resource "aws_security_group_rule" "allow-all-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
}
resource "aws_security_group_rule" "allow-ssh-connections" {
type = "ingress"
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
}

View File

@@ -0,0 +1,19 @@
output "aws_vpc_id" {
value = aws_vpc.cluster-vpc.id
}
output "aws_subnet_ids_private" {
value = aws_subnet.cluster-vpc-subnets-private.*.id
}
output "aws_subnet_ids_public" {
value = aws_subnet.cluster-vpc-subnets-public.*.id
}
output "aws_security_group" {
value = aws_security_group.kubernetes.*.id
}
output "default_tags" {
value = var.default_tags
}

View File

@@ -0,0 +1,27 @@
variable "aws_vpc_cidr_block" {
description = "CIDR Blocks for AWS VPC"
}
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_avail_zones" {
description = "AWS Availability Zones Used"
type = list(string)
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones"
type = list(string)
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones"
type = list(string)
}
variable "default_tags" {
description = "Default tags for all resources"
type = map(string)
}