kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

5
contrib/terraform/OWNERS Normal file
View File

@@ -0,0 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- holmsten
- miouge1

3
contrib/terraform/aws/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
*.tfstate*
.terraform.lock.hcl
.terraform

View File

@@ -0,0 +1,124 @@
# Kubernetes on AWS with Terraform
## Overview
This project will create:
- VPC with Public and Private Subnets in # Availability Zones
- Bastion Hosts and NAT Gateways in the Public Subnet
- A dynamic number of masters, etcd, and worker nodes in the Private Subnet
- even distributed over the # of Availability Zones
- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
## Requirements
- Terraform 0.12.0 or newer
## How to Use
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
```commandline
export TF_VAR_AWS_ACCESS_KEY_ID="www"
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
export TF_VAR_AWS_DEFAULT_REGION="zzz"
```
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example:
```commandline
terraform apply -var-file=credentials.tfvars
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
```commandline
ssh -F ./ssh-bastion.conf user@$ip
```
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
Example (this one assumes you are using Ubuntu)
```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
```
## Using other distrib than Ubuntu***
To leverage a Linux distribution other than Ubuntu 18.04 (Bionic) LTS for your Terraform configurations, you can adjust the AMI search filters within the 'data "aws_ami" "distro"' block by utilizing variables in your `terraform.tfvars` file. This approach ensures a flexible configuration that adapts to various Linux distributions without directly modifying the core Terraform files.
### Example Usages
- **Debian Jessie**: To configure the usage of Debian Jessie, insert the subsequent lines into your `terraform.tfvars`:
```hcl
ami_name_pattern = "debian-jessie-amd64-hvm-*"
ami_owners = ["379101102735"]
```
- **Ubuntu 16.04**: To utilize Ubuntu 16.04 instead, apply the following configuration in your `terraform.tfvars`:
```hcl
ami_name_pattern = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"
ami_owners = ["099720109477"]
```
- **Centos 7**: For employing Centos 7, incorporate these lines into your `terraform.tfvars`:
```hcl
ami_name_pattern = "dcos-centos7-*"
ami_owners = ["688023202711"]
```
## Connecting to Kubernetes
You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder.
```commandline
# Get the controller's IP address.
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
# Get the hostname of the load balancer.
LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2)
# Get the controller's SSH fingerprint.
ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1
ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null
# Get the kubeconfig from the controller.
mkdir -p ~/.kube
ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf"
scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config
sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config
kubectl get nodes
```
## Troubleshooting
### Remaining AWS IAM Instance Profile
If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command:
```commandline
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
```
### Ansible Inventory doesn't get created
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
## Architecture
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
![AWS Infrastructure with Terraform ](docs/aws_kubespray.png)

View File

@@ -0,0 +1,179 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
access_key = var.AWS_ACCESS_KEY_ID
secret_key = var.AWS_SECRET_ACCESS_KEY
region = var.AWS_DEFAULT_REGION
}
data "aws_availability_zones" "available" {}
/*
* Calling modules who create the initial AWS VPC / AWS ELB
* and AWS IAM Roles for Kubernetes Deployment
*/
module "aws-vpc" {
source = "./modules/vpc"
aws_cluster_name = var.aws_cluster_name
aws_vpc_cidr_block = var.aws_vpc_cidr_block
aws_avail_zones = data.aws_availability_zones.available.names
aws_cidr_subnets_private = var.aws_cidr_subnets_private
aws_cidr_subnets_public = var.aws_cidr_subnets_public
default_tags = var.default_tags
}
module "aws-nlb" {
source = "./modules/nlb"
aws_cluster_name = var.aws_cluster_name
aws_vpc_id = module.aws-vpc.aws_vpc_id
aws_avail_zones = data.aws_availability_zones.available.names
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
aws_nlb_api_port = var.aws_nlb_api_port
k8s_secure_api_port = var.k8s_secure_api_port
default_tags = var.default_tags
}
module "aws-iam" {
source = "./modules/iam"
aws_cluster_name = var.aws_cluster_name
}
/*
* Create Bastion Instances in AWS
*
*/
resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id
instance_type = var.aws_bastion_size
count = var.aws_bastion_num
associate_public_ip_address = true
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group
key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
Cluster = var.aws_cluster_name
Role = "bastion-${var.aws_cluster_name}-${count.index}"
}))
}
/*
* Create K8s Master and worker nodes and etcd instances
*
*/
resource "aws_instance" "k8s-master" {
ami = data.aws_ami.distro.id
instance_type = var.aws_kube_master_size
count = var.aws_kube_master_num
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_kube_master_disk_size
}
iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
Role = "master"
}))
}
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" {
count = var.aws_kube_master_num
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn
target_id = element(aws_instance.k8s-master.*.private_ip, count.index)
}
resource "aws_instance" "k8s-etcd" {
ami = data.aws_ami.distro.id
instance_type = var.aws_etcd_size
count = var.aws_etcd_num
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_etcd_disk_size
}
key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
Role = "etcd"
}))
}
resource "aws_instance" "k8s-worker" {
ami = data.aws_ami.distro.id
instance_type = var.aws_kube_worker_size
count = var.aws_kube_worker_num
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_kube_worker_disk_size
}
iam_instance_profile = module.aws-iam.kube-worker-profile
key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
Role = "worker"
}))
}
/*
* Create Kubespray Inventory File
*
*/
data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl")
vars = {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
list_master = join("\n", aws_instance.k8s-master.*.private_dns)
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers = {
template = data.template_file.inventory.rendered
}
}

View File

@@ -0,0 +1,8 @@
#AWS Access Key
AWS_ACCESS_KEY_ID = ""
#AWS Secret Key
AWS_SECRET_ACCESS_KEY = ""
#EC2 SSH Key Name
AWS_SSH_KEY_NAME = ""
#AWS Region
AWS_DEFAULT_REGION = "eu-central-1"

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

View File

@@ -0,0 +1,141 @@
#Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
#Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube_control_plane.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}
EOF
}
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = aws_iam_role.kube-worker.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube_control_plane" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube_control_plane.name
}
resource "aws_iam_instance_profile" "kube-worker" {
name = "kube_${var.aws_cluster_name}_node_profile"
role = aws_iam_role.kube-worker.name
}

View File

@@ -0,0 +1,7 @@
output "kube_control_plane-profile" {
value = aws_iam_instance_profile.kube_control_plane.name
}
output "kube-worker-profile" {
value = aws_iam_instance_profile.kube-worker.name
}

View File

@@ -0,0 +1,3 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}

View File

@@ -0,0 +1,41 @@
# Create a new AWS NLB for K8S API
resource "aws_lb" "aws-nlb-api" {
name = "kubernetes-nlb-${var.aws_cluster_name}"
load_balancer_type = "network"
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
idle_timeout = 400
enable_cross_zone_load_balancing = true
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
}))
}
# Create a new AWS NLB Instance Target Group
resource "aws_lb_target_group" "aws-nlb-api-tg" {
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
port = var.k8s_secure_api_port
protocol = "TCP"
target_type = "ip"
vpc_id = var.aws_vpc_id
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
interval = 30
protocol = "HTTPS"
path = "/healthz"
}
}
# Create a new AWS NLB Listener listen to target group
resource "aws_lb_listener" "aws-nlb-api-listener" {
load_balancer_arn = aws_lb.aws-nlb-api.arn
port = var.aws_nlb_api_port
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
}
}

View File

@@ -0,0 +1,11 @@
output "aws_nlb_api_id" {
value = aws_lb.aws-nlb-api.id
}
output "aws_nlb_api_fqdn" {
value = aws_lb.aws-nlb-api.dns_name
}
output "aws_nlb_api_tg_arn" {
value = aws_lb_target_group.aws-nlb-api-tg.arn
}

View File

@@ -0,0 +1,30 @@
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_vpc_id" {
description = "AWS VPC ID"
}
variable "aws_nlb_api_port" {
description = "Port for AWS NLB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = list(string)
}
variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets"
type = list(string)
}
variable "default_tags" {
description = "Tags for all resources"
type = map(string)
}

View File

@@ -0,0 +1,137 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = var.aws_vpc_cidr_block
#DNS Related Entries
enable_dns_support = true
enable_dns_hostnames = true
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-vpc"
}))
}
resource "aws_eip" "cluster-nat-eip" {
count = length(var.aws_cidr_subnets_public)
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
}))
}
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_public)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
cidr_block = element(var.aws_cidr_subnets_public, count.index)
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}))
}
resource "aws_nat_gateway" "cluster-nat-gateway" {
count = length(var.aws_cidr_subnets_public)
allocation_id = element(aws_eip.cluster-nat-eip.*.id, count.index)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = aws_vpc.cluster-vpc.id
count = length(var.aws_cidr_subnets_private)
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
cidr_block = element(var.aws_cidr_subnets_private, count.index)
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}))
}
#Routing in VPC
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
resource "aws_route_table" "kubernetes-public" {
vpc_id = aws_vpc.cluster-vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
}
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
}))
}
resource "aws_route_table" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
vpc_id = aws_vpc.cluster-vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
}
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
}))
}
resource "aws_route_table_association" "kubernetes-public" {
count = length(var.aws_cidr_subnets_public)
subnet_id = element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)
route_table_id = aws_route_table.kubernetes-public.id
}
resource "aws_route_table_association" "kubernetes-private" {
count = length(var.aws_cidr_subnets_private)
subnet_id = element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)
route_table_id = element(aws_route_table.kubernetes-private.*.id, count.index)
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
vpc_id = aws_vpc.cluster-vpc.id
tags = merge(var.default_tags, tomap({
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
}))
}
resource "aws_security_group_rule" "allow-all-ingress" {
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = [var.aws_vpc_cidr_block]
security_group_id = aws_security_group.kubernetes.id
}
resource "aws_security_group_rule" "allow-all-egress" {
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
}
resource "aws_security_group_rule" "allow-ssh-connections" {
type = "ingress"
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.kubernetes.id
}

View File

@@ -0,0 +1,19 @@
output "aws_vpc_id" {
value = aws_vpc.cluster-vpc.id
}
output "aws_subnet_ids_private" {
value = aws_subnet.cluster-vpc-subnets-private.*.id
}
output "aws_subnet_ids_public" {
value = aws_subnet.cluster-vpc-subnets-public.*.id
}
output "aws_security_group" {
value = aws_security_group.kubernetes.*.id
}
output "default_tags" {
value = var.default_tags
}

View File

@@ -0,0 +1,27 @@
variable "aws_vpc_cidr_block" {
description = "CIDR Blocks for AWS VPC"
}
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_avail_zones" {
description = "AWS Availability Zones Used"
type = list(string)
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability zones"
type = list(string)
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability zones"
type = list(string)
}
variable "default_tags" {
description = "Default tags for all resources"
type = map(string)
}

View File

@@ -0,0 +1,27 @@
output "bastion_ip" {
value = join("\n", aws_instance.bastion-server.*.public_ip)
}
output "masters" {
value = join("\n", aws_instance.k8s-master.*.private_ip)
}
output "workers" {
value = join("\n", aws_instance.k8s-worker.*.private_ip)
}
output "etcd" {
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
}
output "aws_nlb_api_fqdn" {
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}"
}
output "inventory" {
value = data.template_file.inventory.rendered
}
output "default_tags" {
value = var.default_tags
}

View File

@@ -0,0 +1,59 @@
#Global Vars
aws_cluster_name = "devtest"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
#Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t2.medium"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t2.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3
aws_etcd_size = "t2.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_kube_worker_disk_size = 50
#Settings AWS NLB
aws_nlb_api_port = 6443
k8s_secure_api_port = 6443
default_tags = {
# Env = "devtest" # Product = "kubernetes"
}
inventory_file = "../../../inventory/hosts"
## Credentials
#AWS Access Key
AWS_ACCESS_KEY_ID = ""
#AWS Secret Key
AWS_SECRET_ACCESS_KEY = ""
#EC2 SSH Key Name
AWS_SSH_KEY_NAME = ""
#AWS Region
AWS_DEFAULT_REGION = "eu-central-1"

View File

@@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View File

@@ -0,0 +1,27 @@
[all]
${connection_strings_master}
${connection_strings_node}
${connection_strings_etcd}
${public_ip_address_bastion}
[bastion]
${public_ip_address_bastion}
[kube_control_plane]
${list_master}
[kube_node]
${list_node}
[etcd]
${list_etcd}
[calico_rr]
[k8s_cluster:children]
kube_node
kube_control_plane
calico_rr
[k8s_cluster:vars]
${nlb_api_fqdn}

View File

@@ -0,0 +1,43 @@
#Global Vars
aws_cluster_name = "devtest"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
# single AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/20"]
#aws_cidr_subnets_public = ["10.250.224.0/20"]
# 3+ AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]
#Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t3.small"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 0
aws_etcd_size = "t3.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4
aws_kube_worker_size = "t3.medium"
aws_kube_worker_disk_size = 50
#Settings AWS ELB
aws_nlb_api_port = 6443
k8s_secure_api_port = 6443
default_tags = {
# Env = "devtest"
# Product = "kubernetes"
}
inventory_file = "../../../inventory/hosts"

View File

@@ -0,0 +1,33 @@
#Global Vars
aws_cluster_name = "devtest"
#VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
#Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t3.small"
#Kubernetes Cluster
aws_kube_master_num = 3
aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3
aws_etcd_size = "t3.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4
aws_kube_worker_size = "t3.medium"
aws_kube_worker_disk_size = 50
#Settings AWS ELB
aws_nlb_api_port = 6443
k8s_secure_api_port = 6443
default_tags = { }
inventory_file = "../../../inventory/hosts"

View File

@@ -0,0 +1,143 @@
variable "AWS_ACCESS_KEY_ID" {
description = "AWS Access Key"
}
variable "AWS_SECRET_ACCESS_KEY" {
description = "AWS Secret Key"
}
variable "AWS_SSH_KEY_NAME" {
description = "Name of the SSH keypair to use in AWS."
}
variable "AWS_DEFAULT_REGION" {
description = "AWS Region"
}
//General Cluster Settings
variable "aws_cluster_name" {
description = "Name of AWS Cluster"
}
variable "ami_name_pattern" {
description = "The name pattern to use for AMI lookup"
type = string
default = "debian-10-amd64-*"
}
variable "ami_virtualization_type" {
description = "The virtualization type to use for AMI lookup"
type = string
default = "hvm"
}
variable "ami_owners" {
description = "The owners to use for AMI lookup"
type = list(string)
default = ["136693071363"]
}
data "aws_ami" "distro" {
most_recent = true
filter {
name = "name"
values = [var.ami_name_pattern]
}
filter {
name = "virtualization-type"
values = [var.ami_virtualization_type]
}
owners = var.ami_owners
}
//AWS VPC Variables
variable "aws_vpc_cidr_block" {
description = "CIDR Block for VPC"
}
variable "aws_cidr_subnets_private" {
description = "CIDR Blocks for private subnets in Availability Zones"
type = list(string)
}
variable "aws_cidr_subnets_public" {
description = "CIDR Blocks for public subnets in Availability Zones"
type = list(string)
}
//AWS EC2 Settings
variable "aws_bastion_size" {
description = "EC2 Instance Size of Bastion Host"
}
/*
* AWS EC2 Settings
* The number should be divisable by the number of used
* AWS Availability Zones without an remainder.
*/
variable "aws_bastion_num" {
description = "Number of Bastion Nodes"
}
variable "aws_kube_master_num" {
description = "Number of Kubernetes Master Nodes"
}
variable "aws_kube_master_disk_size" {
description = "Disk size for Kubernetes Master Nodes (in GiB)"
}
variable "aws_kube_master_size" {
description = "Instance size of Kube Master Nodes"
}
variable "aws_etcd_num" {
description = "Number of etcd Nodes"
}
variable "aws_etcd_disk_size" {
description = "Disk size for etcd Nodes (in GiB)"
}
variable "aws_etcd_size" {
description = "Instance size of etcd Nodes"
}
variable "aws_kube_worker_num" {
description = "Number of Kubernetes Worker Nodes"
}
variable "aws_kube_worker_disk_size" {
description = "Disk size for Kubernetes Worker Nodes (in GiB)"
}
variable "aws_kube_worker_size" {
description = "Instance size of Kubernetes Worker Nodes"
}
/*
* AWS NLB Settings
*
*/
variable "aws_nlb_api_port" {
description = "Port for AWS NLB"
}
variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "default_tags" {
description = "Default tags for all resources"
type = map(string)
}
variable "inventory_file" {
description = "Where to store the generated inventory file"
}

View File

@@ -0,0 +1,246 @@
# Kubernetes on Equinix Metal with Terraform
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
[Equinix Metal](https://metal.equinix.com) ([formerly Packet](https://blog.equinix.com/blog/2020/10/06/equinix-metal-metal-and-more/)).
## Status
This will install a Kubernetes cluster on Equinix Metal. It should work in all locations and on most server types.
## Approach
The terraform configuration inspects variables found in
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
There is a [python script](../terraform.py) that reads the generated`.tfstate`
file to generate a dynamic inventory that is consumed by [cluster.yml](../../../cluster.yml)
to actually install Kubernetes with Kubespray.
### Kubernetes Nodes
You can create many different kubernetes topologies by setting the number of
different classes of hosts.
- Master nodes with etcd: `number_of_k8s_masters` variable
- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable
- Standalone etcd hosts: `number_of_etcd` variable
- Kubernetes worker nodes: `number_of_k8s_nodes` variable
Note that the Ansible script will report an invalid configuration if you wind up
with an *even number* of etcd instances since that is not a valid configuration. This
restriction includes standalone etcd nodes that are deployed in a cluster along with
master nodes with etcd replicas. As an example, if you have three master nodes with
etcd replicas and three standalone etcd nodes, the script will fail since there are
now six total etcd replicas.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- [Install Ansible dependencies](/docs/ansible.md#installing-ansible)
- Account with Equinix Metal
- An SSH key pair
## SSH Key Setup
An SSH keypair is required so Ansible can access the newly provisioned nodes (Equinix Metal hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Equinix Metal (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error.
If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command:
```ShellSession
ssh-keygen -f ~/.ssh/id_rsa
```
## Terraform
Terraform will be used to provision all of the Equinix Metal resources with base software as appropriate.
### Configuration
#### Inventory files
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
```ShellSession
cp -LRp contrib/terraform/equinix/sample-inventory inventory/$CLUSTER
cd inventory/$CLUSTER
ln -s ../../contrib/terraform/equinix/hosts
```
This will be the base for subsequent Terraform commands.
#### Equinix Metal API access
Your Equinix Metal API key must be available in the `METAL_AUTH_TOKEN` environment variable.
This key is typically stored outside of the code repo since it is considered secret.
If someone gets this key, they can startup/shutdown hosts in your project!
For more information on how to generate an API key or find your project ID, please see
[Accounts Index](https://metal.equinix.com/developers/docs/accounts/).
The Equinix Metal Project ID associated with the key will be set later in `cluster.tfvars`.
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
For more information about terraform provider authentication, please see [the equinix provider documentation](https://registry.terraform.io/providers/equinix/equinix/latest/docs).
Example:
```ShellSession
export METAL_AUTH_TOKEN="Example-API-Token"
```
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
#### Cluster variables
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
The `cluster_name` is used to set a tag on each server deployed as part of this cluster.
This helps when identifying which hosts are associated with each cluster.
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
- cluster_name = the name of the inventory directory created above as $CLUSTER
- equinix_metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
#### Enable localhost access
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
`kubeconfig_localhost: true` in the Kubespray configuration.
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
`\# kubeconfig_localhost: false`
becomes:
`kubeconfig_localhost: true`
Once the Kubespray playbooks are run, a Kubernetes configuration file will be written to the local host at `inventory/$CLUSTER/artifacts/admin.conf`
#### Terraform state files
In the cluster's inventory folder, the following files might be created (either by Terraform
or manually), to prevent you from pushing them accidentally they are in a
`.gitignore` file in the `contrib/terraform/equinix` directory :
- `.terraform`
- `.tfvars`
- `.tfstate`
- `.tfstate.backup`
- `.lock.hcl`
You can still add them manually if you want to.
### Initialization
Before Terraform can operate on your cluster you need to install the required
plugins. This is accomplished as follows:
```ShellSession
cd inventory/$CLUSTER
terraform -chdir=../../contrib/terraform/metal init -var-file=cluster.tfvars
```
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
### Provisioning cluster
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
terraform -chdir=../../contrib/terraform/equinix apply -var-file=cluster.tfvars
export ANSIBLE_HOST_KEY_CHECKING=False
ansible-playbook -i hosts ../../cluster.yml
```
### Destroying cluster
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
terraform -chdir=../../contrib/terraform/equinix destroy -var-file=cluster.tfvars
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
- Remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
- Clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
### Debugging
You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command.
## Ansible
### Node access
#### SSH
Ensure your local ssh-agent is running and your ssh key has been added. This
step is required by the terraform provisioner:
```ShellSession
eval $(ssh-agent -s)
ssh-add ~/.ssh/id_rsa
```
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
#### Test access
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
```ShellSession
$ ansible -i inventory/$CLUSTER/hosts -m ping all
example-k8s_node-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
example-etcd-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
example-k8s-master-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
```
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
### Deploy Kubernetes
```ShellSession
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
```
This will take some time as there are many tasks to run.
## Kubernetes
### Set up kubectl
- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
- Verify that Kubectl runs correctly
```ShellSession
kubectl version
```
- Verify that the Kubernetes configuration file has been copied over
```ShellSession
cat inventory/alpha/$CLUSTER/admin.conf
```
- Verify that all the nodes are running correctly.
```ShellSession
kubectl version
kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes
```
## What's next
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).

View File

@@ -0,0 +1 @@
../terraform.py

View File

@@ -0,0 +1,57 @@
resource "equinix_metal_ssh_key" "k8s" {
count = var.public_key_path != "" ? 1 : 0
name = "kubernetes-${var.cluster_name}"
public_key = chomp(file(var.public_key_path))
}
resource "equinix_metal_device" "k8s_master" {
depends_on = [equinix_metal_ssh_key.k8s]
count = var.number_of_k8s_masters
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
plan = var.plan_k8s_masters
metro = var.metro
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.equinix_metal_project_id
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
}
resource "equinix_metal_device" "k8s_master_no_etcd" {
depends_on = [equinix_metal_ssh_key.k8s]
count = var.number_of_k8s_masters_no_etcd
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
plan = var.plan_k8s_masters_no_etcd
metro = var.metro
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.equinix_metal_project_id
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
}
resource "equinix_metal_device" "k8s_etcd" {
depends_on = [equinix_metal_ssh_key.k8s]
count = var.number_of_etcd
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
plan = var.plan_etcd
metro = var.metro
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.equinix_metal_project_id
tags = ["cluster-${var.cluster_name}", "etcd"]
}
resource "equinix_metal_device" "k8s_node" {
depends_on = [equinix_metal_ssh_key.k8s]
count = var.number_of_k8s_nodes
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
plan = var.plan_k8s_nodes
metro = var.metro
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.equinix_metal_project_id
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
}

View File

@@ -0,0 +1,15 @@
output "k8s_masters" {
value = equinix_metal_device.k8s_master.*.access_public_ipv4
}
output "k8s_masters_no_etc" {
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
}
output "k8s_etcds" {
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
}
output "k8s_nodes" {
value = equinix_metal_device.k8s_node.*.access_public_ipv4
}

View File

@@ -0,0 +1,17 @@
terraform {
required_version = ">= 1.0.0"
provider_meta "equinix" {
module_name = "kubespray"
}
required_providers {
equinix = {
source = "equinix/equinix"
version = "1.24.0"
}
}
}
# Configure the Equinix Metal Provider
provider "equinix" {
}

View File

@@ -0,0 +1,35 @@
# your Kubernetes cluster name here
cluster_name = "mycluster"
# Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/
equinix_metal_project_id = "Example-Project-Id"
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
# leave this value blank if the public key is already setup in the Equinix Metal project
# Terraform will complain if the public key is setup in Equinix Metal
public_key_path = "~/.ssh/id_rsa.pub"
# Equinix interconnected bare metal across our global metros.
metro = "da"
# operating_system
operating_system = "ubuntu_22_04"
# standalone etcds
number_of_etcd = 0
plan_etcd = "t1.small.x86"
# masters
number_of_k8s_masters = 1
number_of_k8s_masters_no_etcd = 0
plan_k8s_masters = "t1.small.x86"
plan_k8s_masters_no_etcd = "t1.small.x86"
# nodes
number_of_k8s_nodes = 2
plan_k8s_nodes = "t1.small.x86"

View File

@@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View File

@@ -0,0 +1,56 @@
variable "cluster_name" {
default = "kubespray"
}
variable "equinix_metal_project_id" {
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
}
variable "operating_system" {
default = "ubuntu_22_04"
}
variable "public_key_path" {
description = "The path of the ssh pub key"
default = "~/.ssh/id_rsa.pub"
}
variable "billing_cycle" {
default = "hourly"
}
variable "metro" {
default = "da"
}
variable "plan_k8s_masters" {
default = "c3.small.x86"
}
variable "plan_k8s_masters_no_etcd" {
default = "c3.small.x86"
}
variable "plan_etcd" {
default = "c3.small.x86"
}
variable "plan_k8s_nodes" {
default = "c3.medium.x86"
}
variable "number_of_k8s_masters" {
default = 1
}
variable "number_of_k8s_masters_no_etcd" {
default = 0
}
variable "number_of_etcd" {
default = 0
}
variable "number_of_k8s_nodes" {
default = 1
}

View File

@@ -0,0 +1,152 @@
# Kubernetes on Exoscale with Terraform
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+-----------------------+
+---------------+ | +--------------+ |
| | | | +--------------+ |
| API server LB +---------> | | | |
| | | | | Master/etcd | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
+---------------+ | +--------------+ |
| | | | +--------------+ |
| Ingress LB +---------> | | | |
| | | | | Worker | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
+-----------------------+
```
## Requirements
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
## Quickstart
NOTE: *Assumes you are at the root of the kubespray repo*
Copy the sample inventory for your cluster and copy the default terraform variables.
```bash
CLUSTER=my-exoscale-cluster
cp -r inventory/sample inventory/$CLUSTER
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER
```
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
```bash
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
$EDITOR default.tfvars
```
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
The file should look like something like this:
```ini
[cloudstack]
key = <API key>
secret = <API secret>
```
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
### Encrypted credentials
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
```bash
cat << EOF > cloudstack.ini
[cloudstack]
key =
secret =
EOF
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
sops cloudstack.ini
```
Run terraform to create the infrastructure
```bash
terraform init ../../contrib/terraform/exoscale
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
```
If your cloudstack credentials file is encrypted using sops, run the following:
```bash
terraform init ../../contrib/terraform/exoscale
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
You can now copy your inventory file and use it with kubespray to set up a cluster.
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
```bash
ansible -i inventory.ini -m ping all
```
Example to use this with the default sample inventory:
```bash
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
```
## Teardown
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
```bash
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
```
## Variables
### Required
* `ssh_public_keys`: List of public SSH keys to install on all machines
* `zone`: The zone where to run the cluster
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `size`: The size to use
* `boot_disk`: The boot disk to use
* `image_name`: Name of the image
* `root_partition_size`: Size *(in GB)* for the root partition
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
### Optional
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
An example variables file can be found `default.tfvars`
## Known limitations
### Only single disk
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
### No Kubernetes API
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.

View File

@@ -0,0 +1,65 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "standard.medium",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "standard.large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -0,0 +1,49 @@
provider "exoscale" {}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
prefix = var.prefix
zone = var.zone
machines = var.machines
ssh_public_keys = var.ssh_public_keys
ssh_whitelist = var.ssh_whitelist
api_server_whitelist = var.api_server_whitelist
nodeport_whitelist = var.nodeport_whitelist
}
#
# Generate ansible inventory
#
data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl")
vars = {
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
keys(module.kubernetes.master_ip_addresses),
values(module.kubernetes.master_ip_addresses).*.public_ip,
values(module.kubernetes.master_ip_addresses).*.private_ip,
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
keys(module.kubernetes.worker_ip_addresses),
values(module.kubernetes.worker_ip_addresses).*.public_ip,
values(module.kubernetes.worker_ip_addresses).*.private_ip))
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers = {
template = data.template_file.inventory.rendered
}
}

View File

@@ -0,0 +1,191 @@
data "exoscale_template" "os_image" {
for_each = var.machines
zone = var.zone
name = each.value.boot_disk.image_name
}
data "exoscale_compute_instance" "master_nodes" {
for_each = exoscale_compute_instance.master
id = each.value.id
zone = var.zone
}
data "exoscale_compute_instance" "worker_nodes" {
for_each = exoscale_compute_instance.worker
id = each.value.id
zone = var.zone
}
resource "exoscale_private_network" "private_network" {
zone = var.zone
name = "${var.prefix}-network"
start_ip = cidrhost(var.private_network_cidr, 1)
# cidr -1 = Broadcast address
# cidr -2 = DHCP server address (exoscale specific)
end_ip = cidrhost(var.private_network_cidr, -3)
netmask = cidrnetmask(var.private_network_cidr)
}
resource "exoscale_compute_instance" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_group_ids = [exoscale_security_group.master_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
node_type = "master"
ssh_public_keys = var.ssh_public_keys
}
)
}
resource "exoscale_compute_instance" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
name = "${var.prefix}-${each.key}"
template_id = data.exoscale_template.os_image[each.key].id
type = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
state = "Running"
zone = var.zone
security_group_ids = [exoscale_security_group.worker_sg.id]
network_interface {
network_id = exoscale_private_network.private_network.id
}
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
node_local_partition_size = each.value.boot_disk.node_local_partition_size
ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size
node_type = "worker"
ssh_public_keys = var.ssh_public_keys
}
)
}
resource "exoscale_security_group" "master_sg" {
name = "${var.prefix}-master-sg"
description = "Security group for Kubernetes masters"
}
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.ssh_whitelist)
# SSH
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
security_group_id = exoscale_security_group.master_sg.id
for_each = toset(var.api_server_whitelist)
# Kubernetes API
type = "INGRESS"
start_port = 6443
end_port = 6443
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group" "worker_sg" {
name = "${var.prefix}-worker-sg"
description = "security group for kubernetes worker nodes"
}
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
security_group_id = exoscale_security_group.worker_sg.id
# SSH
for_each = toset(var.ssh_whitelist)
type = "INGRESS"
start_port = 22
end_port = 22
protocol = "TCP"
cidr = each.value
}
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
for_each = toset(["80", "443"])
type = "INGRESS"
start_port = each.value
end_port = each.value
protocol = "TCP"
cidr = "0.0.0.0/0"
}
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
security_group_id = exoscale_security_group.worker_sg.id
# HTTP(S)
for_each = toset(var.nodeport_whitelist)
type = "INGRESS"
start_port = 30000
end_port = 32767
protocol = "TCP"
cidr = each.value
}
resource "exoscale_elastic_ip" "ingress_controller_lb" {
zone = var.zone
healthcheck {
mode = "http"
port = 80
uri = "/healthz"
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
}
resource "exoscale_elastic_ip" "control_plane_lb" {
zone = var.zone
healthcheck {
mode = "tcp"
port = 6443
interval = 10
timeout = 2
strikes_ok = 2
strikes_fail = 3
}
}

View File

@@ -0,0 +1,31 @@
output "master_ip_addresses" {
value = {
for key, instance in exoscale_compute_instance.master :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.master[key].ip_address
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in exoscale_compute_instance.worker :
instance.name => {
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
"public_ip" = exoscale_compute_instance.worker[key].ip_address
}
}
}
output "cluster_private_network_cidr" {
value = var.private_network_cidr
}
output "ingress_controller_lb_ip_address" {
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
}
output "control_plane_lb_ip_address" {
value = exoscale_elastic_ip.control_plane_lb.ip_address
}

View File

@@ -0,0 +1,52 @@
#cloud-config
%{ if ceph_partition_size > 0 || node_local_partition_size > 0}
bootcmd:
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ]
%{ if node_local_partition_size > 0 }
# Create partition for node local storage
- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ]
- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ]
%{ endif }
%{ if ceph_partition_size > 0 }
# Create partition for rook to use for ceph
- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ]
%{ endif }
%{ endif }
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}
write_files:
- path: /etc/netplan/eth1.yaml
content: |
network:
version: 2
ethernets:
eth1:
dhcp4: true
%{ if node_type == "worker" }
# TODO: When a VM is seen as healthy and is added to the EIP loadbalancer
# pool it no longer can send traffic back to itself via the EIP IP
# address.
# Remove this if it ever gets solved.
- path: /etc/netplan/20-eip-fix.yaml
content: |
network:
version: 2
ethernets:
"lo:0":
match:
name: lo
dhcp4: false
addresses:
- ${eip_ip_address}/32
%{ endif }
runcmd:
- netplan apply
%{ if node_local_partition_size > 0 }
- mkdir -p /mnt/disks/node-local-storage
- chown nobody:nogroup /mnt/disks/node-local-storage
- mount /dev/vda2 /mnt/disks/node-local-storage
%{ endif }

View File

@@ -0,0 +1,42 @@
variable "zone" {
type = string
# This is currently the only zone that is supposed to be supporting
# so called "managed private networks".
# See: https://www.exoscale.com/syslog/introducing-managed-private-networks
default = "ch-gva-2"
}
variable "prefix" {}
variable "machines" {
type = map(object({
node_type = string
size = string
boot_disk = object({
image_name = string
root_partition_size = number
ceph_partition_size = number
node_local_partition_size = number
})
}))
}
variable "ssh_public_keys" {
type = list(string)
}
variable "ssh_whitelist" {
type = list(string)
}
variable "api_server_whitelist" {
type = list(string)
}
variable "nodeport_whitelist" {
type = list(string)
}
variable "private_network_cidr" {
default = "172.0.10.0/24"
}

View File

@@ -0,0 +1,9 @@
terraform {
required_providers {
exoscale = {
source = "exoscale/exoscale"
version = ">= 0.21"
}
}
required_version = ">= 0.13"
}

View File

@@ -0,0 +1,15 @@
output "master_ips" {
value = module.kubernetes.master_ip_addresses
}
output "worker_ips" {
value = module.kubernetes.worker_ip_addresses
}
output "ingress_controller_lb_ip_address" {
value = module.kubernetes.ingress_controller_lb_ip_address
}
output "control_plane_lb_ip_address" {
value = module.kubernetes.control_plane_lb_ip_address
}

View File

@@ -0,0 +1,65 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Small",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View File

@@ -0,0 +1,19 @@
[all]
${connection_strings_master}
${connection_strings_worker}
[kube_control_plane]
${list_master}
[kube_control_plane:vars]
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
[etcd]
${list_master}
[kube_node]
${list_worker}
[k8s_cluster:children]
kube_control_plane
kube_node

View File

@@ -0,0 +1,46 @@
variable "zone" {
description = "The zone where to run the cluster"
}
variable "prefix" {
description = "Prefix for resource names"
default = "default"
}
variable "machines" {
description = "Cluster machines"
type = map(object({
node_type = string
size = string
boot_disk = object({
image_name = string
root_partition_size = number
ceph_partition_size = number
node_local_partition_size = number
})
}))
}
variable "ssh_public_keys" {
description = "List of public SSH keys which are injected into the VMs."
type = list(string)
}
variable "ssh_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for ssh"
type = list(string)
}
variable "api_server_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
type = list(string)
}
variable "nodeport_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
type = list(string)
}
variable "inventory_file" {
description = "Where to store the generated inventory file"
}

View File

@@ -0,0 +1,15 @@
terraform {
required_providers {
exoscale = {
source = "exoscale/exoscale"
version = ">= 0.21"
}
null = {
source = "hashicorp/null"
}
template = {
source = "hashicorp/template"
}
}
required_version = ">= 0.13"
}

View File

@@ -0,0 +1,104 @@
# Kubernetes on GCP with Terraform
Provision a Kubernetes cluster on GCP using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+-----------------------+
+---------------+ | +--------------+ |
| | | | +--------------+ |
| API server LB +---------> | | | |
| | | | | Master/etcd | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
+---------------+ | +--------------+ |
| | | | +--------------+ |
| Ingress LB +---------> | | | |
| | | | | Worker | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------+ |
+-----------------------+
```
## Requirements
* Terraform 0.12.0 or newer
## Quickstart
To get a cluster up and running you'll need a JSON keyfile.
Set the path to the file in the `tfvars.json` file and run the following:
```bash
terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id=<ID of your GCP project> -var keyfile_location=<location of the json keyfile>
```
To generate kubespray inventory based on the terraform state file you can run the following:
```bash
./generate-inventory.sh dev-cluster.tfstate > inventory.ini
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g.
```bash
ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
```
## Variables
### Required
* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider
* `gcp_project_id`: ID of the GCP project to deploy the cluster in
* `ssh_pub_key`: Path to public ssh key to use for all machines
* `region`: The region where to run the cluster
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `size`: The size to use
* `zone`: The zone the machine should run in
* `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name
* `size`: Size of the disk (in GB)
* `boot_disk`: The boot disk to use
* `image_name`: Name of the image
* `size`: Size of the boot disk (in GB)
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
* `protocol`: Protocol. Example `"tcp"`
* `ports`: List of ports, as string. Example `["53"]`
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
### Optional
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)*
* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
for the control plane nodes *(Defaults to `false`)*
* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)*
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
for the worker nodes *(Defaults to `false`)*
* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)*
An example variables file can be found `tfvars.json`
## Known limitations
This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work.

View File

@@ -0,0 +1,76 @@
#!/bin/bash
#
# Generates a inventory file based on the terraform output.
# After provisioning a cluster, simply run this command and supply the terraform state file
# Default state file is terraform.tfstate
#
set -e
usage () {
echo "Usage: $0 <state file>" >&2
exit 1
}
if [[ $# -ne 1 ]]; then
usage
fi
TF_STATE_FILE=${1}
if [[ ! -f "${TF_STATE_FILE}" ]]; then
echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2
usage
fi
TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json)
MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}"))
WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}"))
mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}"))
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}"))
# Generate master hosts
i=1
for name in "${MASTER_NAMES[@]}"; do
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}"))
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}"))
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}"
i=$(( i + 1 ))
done
# Generate worker hosts
for name in "${WORKER_NAMES[@]}"; do
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}"))
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}"
done
echo ""
echo "[kube_control_plane]"
for name in "${MASTER_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[kube_control_plane:vars]"
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
echo ""
echo "[etcd]"
for name in "${MASTER_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[kube_node]"
for name in "${WORKER_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[k8s_cluster:children]"
echo "kube_control_plane"
echo "kube_node"

View File

@@ -0,0 +1,39 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.0"
}
}
}
provider "google" {
credentials = file(var.keyfile_location)
region = var.region
project = var.gcp_project_id
}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
region = var.region
prefix = var.prefix
machines = var.machines
ssh_pub_key = var.ssh_pub_key
master_sa_email = var.master_sa_email
master_sa_scopes = var.master_sa_scopes
master_preemptible = var.master_preemptible
master_additional_disk_type = var.master_additional_disk_type
worker_sa_email = var.worker_sa_email
worker_sa_scopes = var.worker_sa_scopes
worker_preemptible = var.worker_preemptible
worker_additional_disk_type = var.worker_additional_disk_type
ssh_whitelist = var.ssh_whitelist
api_server_whitelist = var.api_server_whitelist
nodeport_whitelist = var.nodeport_whitelist
ingress_whitelist = var.ingress_whitelist
extra_ingress_firewalls = var.extra_ingress_firewalls
}

View File

@@ -0,0 +1,421 @@
#################################################
##
## General
##
resource "google_compute_network" "main" {
name = "${var.prefix}-network"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "main" {
name = "${var.prefix}-subnet"
network = google_compute_network.main.name
ip_cidr_range = var.private_network_cidr
region = var.region
}
resource "google_compute_firewall" "deny_all" {
name = "${var.prefix}-default-firewall"
network = google_compute_network.main.name
priority = 1000
source_ranges = ["0.0.0.0/0"]
deny {
protocol = "all"
}
}
resource "google_compute_firewall" "allow_internal" {
name = "${var.prefix}-internal-firewall"
network = google_compute_network.main.name
priority = 500
source_ranges = [var.private_network_cidr]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "ssh" {
count = length(var.ssh_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-ssh-firewall"
network = google_compute_network.main.name
priority = 100
source_ranges = var.ssh_whitelist
allow {
protocol = "tcp"
ports = ["22"]
}
}
resource "google_compute_firewall" "api_server" {
count = length(var.api_server_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-api-server-firewall"
network = google_compute_network.main.name
priority = 100
source_ranges = var.api_server_whitelist
allow {
protocol = "tcp"
ports = ["6443"]
}
}
resource "google_compute_firewall" "nodeport" {
count = length(var.nodeport_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-nodeport-firewall"
network = google_compute_network.main.name
priority = 100
source_ranges = var.nodeport_whitelist
allow {
protocol = "tcp"
ports = ["30000-32767"]
}
}
resource "google_compute_firewall" "ingress_http" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-http-ingress-firewall"
network = google_compute_network.main.name
priority = 100
source_ranges = var.ingress_whitelist
allow {
protocol = "tcp"
ports = ["80"]
}
}
resource "google_compute_firewall" "ingress_https" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-https-ingress-firewall"
network = google_compute_network.main.name
priority = 100
source_ranges = var.ingress_whitelist
allow {
protocol = "tcp"
ports = ["443"]
}
}
#################################################
##
## Local variables
##
locals {
master_target_list = [
for name, machine in google_compute_instance.master :
"${machine.zone}/${machine.name}"
]
worker_target_list = [
for name, machine in google_compute_instance.worker :
"${machine.zone}/${machine.name}"
]
master_disks = flatten([
for machine_name, machine in var.machines : [
for disk_name, disk in machine.additional_disks : {
"${machine_name}-${disk_name}" = {
"machine_name": machine_name,
"machine": machine,
"disk_size": disk.size,
"disk_name": disk_name
}
}
]
if machine.node_type == "master"
])
worker_disks = flatten([
for machine_name, machine in var.machines : [
for disk_name, disk in machine.additional_disks : {
"${machine_name}-${disk_name}" = {
"machine_name": machine_name,
"machine": machine,
"disk_size": disk.size,
"disk_name": disk_name
}
}
]
if machine.node_type == "worker"
])
}
#################################################
##
## Master
##
resource "google_compute_address" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
name = "${var.prefix}-${each.key}-pip"
address_type = "EXTERNAL"
region = var.region
}
resource "google_compute_disk" "master" {
for_each = {
for item in local.master_disks :
keys(item)[0] => values(item)[0]
}
name = "${var.prefix}-${each.key}"
type = var.master_additional_disk_type
zone = each.value.machine.zone
size = each.value.disk_size
physical_block_size_bytes = 4096
}
resource "google_compute_attached_disk" "master" {
for_each = {
for item in local.master_disks :
keys(item)[0] => values(item)[0]
}
disk = google_compute_disk.master[each.key].id
instance = google_compute_instance.master[each.value.machine_name].id
}
resource "google_compute_instance" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
name = "${var.prefix}-${each.key}"
machine_type = each.value.size
zone = each.value.zone
tags = ["control-plane", "master", each.key]
boot_disk {
initialize_params {
image = each.value.boot_disk.image_name
size = each.value.boot_disk.size
}
}
network_interface {
subnetwork = google_compute_subnetwork.main.name
access_config {
nat_ip = google_compute_address.master[each.key].address
}
}
metadata = {
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
}
service_account {
email = var.master_sa_email
scopes = var.master_sa_scopes
}
# Since we use google_compute_attached_disk we need to ignore this
lifecycle {
ignore_changes = [attached_disk]
}
scheduling {
preemptible = var.master_preemptible
automatic_restart = !var.master_preemptible
}
}
resource "google_compute_forwarding_rule" "master_lb" {
count = length(var.api_server_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-master-lb-forward-rule"
port_range = "6443"
target = google_compute_target_pool.master_lb[count.index].id
}
resource "google_compute_target_pool" "master_lb" {
count = length(var.api_server_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-master-lb-pool"
instances = local.master_target_list
}
#################################################
##
## Worker
##
resource "google_compute_disk" "worker" {
for_each = {
for item in local.worker_disks :
keys(item)[0] => values(item)[0]
}
name = "${var.prefix}-${each.key}"
type = var.worker_additional_disk_type
zone = each.value.machine.zone
size = each.value.disk_size
physical_block_size_bytes = 4096
}
resource "google_compute_attached_disk" "worker" {
for_each = {
for item in local.worker_disks :
keys(item)[0] => values(item)[0]
}
disk = google_compute_disk.worker[each.key].id
instance = google_compute_instance.worker[each.value.machine_name].id
}
resource "google_compute_address" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
name = "${var.prefix}-${each.key}-pip"
address_type = "EXTERNAL"
region = var.region
}
resource "google_compute_instance" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
name = "${var.prefix}-${each.key}"
machine_type = each.value.size
zone = each.value.zone
tags = ["worker", each.key]
boot_disk {
initialize_params {
image = each.value.boot_disk.image_name
size = each.value.boot_disk.size
}
}
network_interface {
subnetwork = google_compute_subnetwork.main.name
access_config {
nat_ip = google_compute_address.worker[each.key].address
}
}
metadata = {
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
}
service_account {
email = var.worker_sa_email
scopes = var.worker_sa_scopes
}
# Since we use google_compute_attached_disk we need to ignore this
lifecycle {
ignore_changes = [attached_disk]
}
scheduling {
preemptible = var.worker_preemptible
automatic_restart = !var.worker_preemptible
}
}
resource "google_compute_address" "worker_lb" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-worker-lb-address"
address_type = "EXTERNAL"
region = var.region
}
resource "google_compute_forwarding_rule" "worker_http_lb" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-worker-http-lb-forward-rule"
ip_address = google_compute_address.worker_lb[count.index].address
port_range = "80"
target = google_compute_target_pool.worker_lb[count.index].id
}
resource "google_compute_forwarding_rule" "worker_https_lb" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-worker-https-lb-forward-rule"
ip_address = google_compute_address.worker_lb[count.index].address
port_range = "443"
target = google_compute_target_pool.worker_lb[count.index].id
}
resource "google_compute_target_pool" "worker_lb" {
count = length(var.ingress_whitelist) > 0 ? 1 : 0
name = "${var.prefix}-worker-lb-pool"
instances = local.worker_target_list
}
resource "google_compute_firewall" "extra_ingress_firewall" {
for_each = {
for name, firewall in var.extra_ingress_firewalls :
name => firewall
}
name = "${var.prefix}-${each.key}-ingress"
network = google_compute_network.main.name
priority = 100
source_ranges = each.value.source_ranges
target_tags = each.value.target_tags
allow {
protocol = each.value.protocol
ports = each.value.ports
}
}

View File

@@ -0,0 +1,27 @@
output "master_ip_addresses" {
value = {
for key, instance in google_compute_instance.master :
instance.name => {
"private_ip" = instance.network_interface.0.network_ip
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in google_compute_instance.worker :
instance.name => {
"private_ip" = instance.network_interface.0.network_ip
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
}
}
}
output "ingress_controller_lb_ip_address" {
value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : ""
}
output "control_plane_lb_ip_address" {
value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : ""
}

View File

@@ -0,0 +1,86 @@
variable "region" {
type = string
}
variable "prefix" {}
variable "machines" {
type = map(object({
node_type = string
size = string
zone = string
additional_disks = map(object({
size = number
}))
boot_disk = object({
image_name = string
size = number
})
}))
}
variable "master_sa_email" {
type = string
}
variable "master_sa_scopes" {
type = list(string)
}
variable "master_preemptible" {
type = bool
}
variable "master_additional_disk_type" {
type = string
}
variable "worker_sa_email" {
type = string
}
variable "worker_sa_scopes" {
type = list(string)
}
variable "worker_preemptible" {
type = bool
}
variable "worker_additional_disk_type" {
type = string
}
variable "ssh_pub_key" {}
variable "ssh_whitelist" {
type = list(string)
}
variable "api_server_whitelist" {
type = list(string)
}
variable "nodeport_whitelist" {
type = list(string)
}
variable "ingress_whitelist" {
type = list(string)
default = ["0.0.0.0/0"]
}
variable "private_network_cidr" {
default = "10.0.10.0/24"
}
variable "extra_ingress_firewalls" {
type = map(object({
source_ranges = set(string)
protocol = string
ports = list(string)
target_tags = set(string)
}))
default = {}
}

View File

@@ -0,0 +1,15 @@
output "master_ips" {
value = module.kubernetes.master_ip_addresses
}
output "worker_ips" {
value = module.kubernetes.worker_ip_addresses
}
output "ingress_controller_lb_ip_address" {
value = module.kubernetes.ingress_controller_lb_ip_address
}
output "control_plane_lb_ip_address" {
value = module.kubernetes.control_plane_lb_ip_address
}

View File

@@ -0,0 +1,63 @@
{
"gcp_project_id": "GCP_PROJECT_ID",
"region": "us-central1",
"ssh_pub_key": "~/.ssh/id_rsa.pub",
"keyfile_location": "service-account.json",
"prefix": "development",
"ssh_whitelist": [
"1.2.3.4/32"
],
"api_server_whitelist": [
"1.2.3.4/32"
],
"nodeport_whitelist": [
"1.2.3.4/32"
],
"ingress_whitelist": [
"0.0.0.0/0"
],
"machines": {
"master-0": {
"node_type": "master",
"size": "n1-standard-2",
"zone": "us-central1-a",
"additional_disks": {},
"boot_disk": {
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
"size": 50
}
},
"worker-0": {
"node_type": "worker",
"size": "n1-standard-8",
"zone": "us-central1-a",
"additional_disks": {
"extra-disk-1": {
"size": 100
}
},
"boot_disk": {
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
"size": 50
}
},
"worker-1": {
"node_type": "worker",
"size": "n1-standard-8",
"zone": "us-central1-a",
"additional_disks": {
"extra-disk-1": {
"size": 100
}
},
"boot_disk": {
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
"size": 50
}
}
}
}

View File

@@ -0,0 +1,108 @@
variable keyfile_location {
description = "Location of the json keyfile to use with the google provider"
type = string
}
variable region {
description = "Region of all resources"
type = string
}
variable gcp_project_id {
description = "ID of the project"
type = string
}
variable prefix {
description = "Prefix for resource names"
default = "default"
}
variable machines {
description = "Cluster machines"
type = map(object({
node_type = string
size = string
zone = string
additional_disks = map(object({
size = number
}))
boot_disk = object({
image_name = string
size = number
})
}))
}
variable "master_sa_email" {
type = string
default = ""
}
variable "master_sa_scopes" {
type = list(string)
default = ["https://www.googleapis.com/auth/cloud-platform"]
}
variable "master_preemptible" {
type = bool
default = false
}
variable "master_additional_disk_type" {
type = string
default = "pd-ssd"
}
variable "worker_sa_email" {
type = string
default = ""
}
variable "worker_sa_scopes" {
type = list(string)
default = ["https://www.googleapis.com/auth/cloud-platform"]
}
variable "worker_preemptible" {
type = bool
default = false
}
variable "worker_additional_disk_type" {
type = string
default = "pd-ssd"
}
variable ssh_pub_key {
description = "Path to public SSH key file which is injected into the VMs."
type = string
}
variable ssh_whitelist {
type = list(string)
}
variable api_server_whitelist {
type = list(string)
}
variable nodeport_whitelist {
type = list(string)
}
variable "ingress_whitelist" {
type = list(string)
default = ["0.0.0.0/0"]
}
variable "extra_ingress_firewalls" {
type = map(object({
source_ranges = set(string)
protocol = string
ports = list(string)
target_tags = set(string)
}))
default = {}
}

View File

@@ -0,0 +1 @@
../../inventory/local/group_vars

View File

@@ -0,0 +1,121 @@
# Kubernetes on Hetzner with Terraform
Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+--------------------------+
| +--------------+ |
| | +--------------+ |
| --> | | | |
| | | Master/etcd | |
| | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
| +--------------+ |
| | +--------------+ |
| --> | | | |
| | | Worker | |
| | | node(s) | |
| +-+ | |
| +--------------+ |
+--------------------------+
```
The nodes uses a private network for node to node communication and a public interface for all external communication.
## Requirements
* Terraform 0.14.0 or newer
## Quickstart
NOTE: Assumes you are at the root of the kubespray repo.
For authentication in your cluster you can use the environment variables.
```bash
export HCLOUD_TOKEN=api-token
```
Copy the cluster configuration file.
```bash
CLUSTER=my-hetzner-cluster
cp -r inventory/sample inventory/$CLUSTER
cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER
```
Edit `default.tfvars` to match your requirement.
Flatcar Container Linux instead of the basic Hetzner Images.
```bash
cd ../../contrib/terraform/hetzner
```
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
comment out the `#source = "./modules/kubernetes-cluster"`.
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
Run Terraform to create the infrastructure.
```bash
cd ./kubespray
terraform -chdir=./contrib/terraform/hetzner/ init
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
You can use the inventory file with kubespray to set up a cluster.
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
```bash
ansible -i inventory.ini -m ping all
```
You can setup Kubernetes with kubespray using the generated inventory:
```bash
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
```
## Cloud controller
For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver).
Please read the instructions in both repos on how to install it.
## Teardown
You can teardown your infrastructure using the following Terraform command:
```bash
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
```
## Variables
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix
* `ssh_public_keys`: List of public SSH keys to install on all machines
* `zone`: The zone where to run the cluster
* `network_zone`: the network zone where the cluster is running
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `size`: Size of the VM
* `image`: The image to use for the VM
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443

View File

@@ -0,0 +1,46 @@
prefix = "default"
zone = "hel1"
network_zone = "eu-central"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
ssh_private_key_path = "~/.ssh/id_rsa"
machines = {
"master-0" : {
"node_type" : "master",
"size" : "cx21",
"image" : "ubuntu-22.04",
},
"worker-0" : {
"node_type" : "worker",
"size" : "cx21",
"image" : "ubuntu-22.04",
},
"worker-1" : {
"node_type" : "worker",
"size" : "cx21",
"image" : "ubuntu-22.04",
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ingress_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -0,0 +1,57 @@
provider "hcloud" {}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
# source = "./modules/kubernetes-cluster-flatcar"
prefix = var.prefix
zone = var.zone
machines = var.machines
#only for flatcar
#ssh_private_key_path = var.ssh_private_key_path
ssh_public_keys = var.ssh_public_keys
network_zone = var.network_zone
ssh_whitelist = var.ssh_whitelist
api_server_whitelist = var.api_server_whitelist
nodeport_whitelist = var.nodeport_whitelist
ingress_whitelist = var.ingress_whitelist
}
#
# Generate ansible inventory
#
locals {
inventory = templatefile(
"${path.module}/templates/inventory.tpl",
{
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
keys(module.kubernetes.master_ip_addresses),
values(module.kubernetes.master_ip_addresses).*.public_ip,
values(module.kubernetes.master_ip_addresses).*.private_ip,
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
keys(module.kubernetes.worker_ip_addresses),
values(module.kubernetes.worker_ip_addresses).*.public_ip,
values(module.kubernetes.worker_ip_addresses).*.private_ip))
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
network_id = module.kubernetes.network_id
}
)
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${local.inventory}' > ${var.inventory_file}"
}
triggers = {
template = local.inventory
}
}

View File

@@ -0,0 +1,144 @@
resource "hcloud_network" "kubernetes" {
name = "${var.prefix}-network"
ip_range = var.private_network_cidr
}
resource "hcloud_network_subnet" "kubernetes" {
type = "cloud"
network_id = hcloud_network.kubernetes.id
network_zone = var.network_zone
ip_range = var.private_subnet_cidr
}
resource "hcloud_ssh_key" "first" {
name = var.prefix
public_key = var.ssh_public_keys.0
}
resource "hcloud_server" "machine" {
for_each = {
for name, machine in var.machines :
name => machine
}
name = "${var.prefix}-${each.key}"
ssh_keys = [hcloud_ssh_key.first.id]
# boot into rescue OS
rescue = "linux64"
# dummy value for the OS because Flatcar is not available
image = each.value.image
server_type = each.value.size
location = var.zone
connection {
host = self.ipv4_address
timeout = "5m"
private_key = file(var.ssh_private_key_path)
}
firewall_ids = each.value.node_type == "master" ? [hcloud_firewall.master.id] : [hcloud_firewall.worker.id]
provisioner "file" {
content = data.ct_config.machine-ignitions[each.key].rendered
destination = "/root/ignition.json"
}
provisioner "remote-exec" {
inline = [
"set -ex",
"apt update",
"apt install -y gawk",
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/flatcar/init/flatcar-master/bin/flatcar-install",
"chmod +x flatcar-install",
"./flatcar-install -s -i /root/ignition.json -C stable",
"shutdown -r +1",
]
}
# optional:
provisioner "remote-exec" {
connection {
host = self.ipv4_address
private_key = file(var.ssh_private_key_path)
timeout = "3m"
user = var.user_flatcar
}
inline = [
"sudo hostnamectl set-hostname ${self.name}",
]
}
}
resource "hcloud_server_network" "machine" {
for_each = {
for name, machine in var.machines :
name => hcloud_server.machine[name]
}
server_id = each.value.id
subnet_id = hcloud_network_subnet.kubernetes.id
}
data "ct_config" "machine-ignitions" {
for_each = {
for name, machine in var.machines :
name => machine
}
strict = false
content = templatefile(
"${path.module}/templates/machine.yaml.tmpl",
{
ssh_keys = jsonencode(var.ssh_public_keys)
user_flatcar = var.user_flatcar
name = each.key
}
)
}
resource "hcloud_firewall" "master" {
name = "${var.prefix}-master-firewall"
rule {
direction = "in"
protocol = "tcp"
port = "22"
source_ips = var.ssh_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "6443"
source_ips = var.api_server_whitelist
}
}
resource "hcloud_firewall" "worker" {
name = "${var.prefix}-worker-firewall"
rule {
direction = "in"
protocol = "tcp"
port = "22"
source_ips = var.ssh_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "80"
source_ips = var.ingress_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "443"
source_ips = var.ingress_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "30000-32767"
source_ips = var.nodeport_whitelist
}
}

View File

@@ -0,0 +1,29 @@
output "master_ip_addresses" {
value = {
for name, machine in var.machines :
name => {
"private_ip" = hcloud_server_network.machine[name].ip
"public_ip" = hcloud_server.machine[name].ipv4_address
}
if machine.node_type == "master"
}
}
output "worker_ip_addresses" {
value = {
for name, machine in var.machines :
name => {
"private_ip" = hcloud_server_network.machine[name].ip
"public_ip" = hcloud_server.machine[name].ipv4_address
}
if machine.node_type == "worker"
}
}
output "cluster_private_network_cidr" {
value = var.private_subnet_cidr
}
output "network_id" {
value = hcloud_network.kubernetes.id
}

View File

@@ -0,0 +1,19 @@
variant: flatcar
version: 1.0.0
passwd:
users:
- name: ${user_flatcar}
ssh_authorized_keys: ${ssh_keys}
storage:
files:
- path: /home/core/works
filesystem: root
mode: 0755
contents:
inline: |
#!/bin/bash
set -euo pipefail
hostname="$(hostname)"
echo My name is ${name} and the hostname is $${hostname}

View File

@@ -0,0 +1,60 @@
variable "zone" {
type = string
default = "fsn1"
}
variable "prefix" {
default = "k8s"
}
variable "user_flatcar" {
type = string
default = "core"
}
variable "machines" {
type = map(object({
node_type = string
size = string
image = string
}))
}
variable "ssh_public_keys" {
type = list(string)
}
variable "ssh_private_key_path" {
type = string
default = "~/.ssh/id_rsa"
}
variable "ssh_whitelist" {
type = list(string)
}
variable "api_server_whitelist" {
type = list(string)
}
variable "nodeport_whitelist" {
type = list(string)
}
variable "ingress_whitelist" {
type = list(string)
}
variable "private_network_cidr" {
default = "10.0.0.0/16"
}
variable "private_subnet_cidr" {
default = "10.0.10.0/24"
}
variable "network_zone" {
default = "eu-central"
}

View File

@@ -0,0 +1,14 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
}
ct = {
source = "poseidon/ct"
version = "0.11.0"
}
null = {
source = "hashicorp/null"
}
}
}

View File

@@ -0,0 +1,122 @@
resource "hcloud_network" "kubernetes" {
name = "${var.prefix}-network"
ip_range = var.private_network_cidr
}
resource "hcloud_network_subnet" "kubernetes" {
type = "cloud"
network_id = hcloud_network.kubernetes.id
network_zone = var.network_zone
ip_range = var.private_subnet_cidr
}
resource "hcloud_server" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
name = "${var.prefix}-${each.key}"
image = each.value.image
server_type = each.value.size
location = var.zone
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
ssh_public_keys = var.ssh_public_keys
}
)
firewall_ids = [hcloud_firewall.master.id]
}
resource "hcloud_server_network" "master" {
for_each = hcloud_server.master
server_id = each.value.id
subnet_id = hcloud_network_subnet.kubernetes.id
}
resource "hcloud_server" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
name = "${var.prefix}-${each.key}"
image = each.value.image
server_type = each.value.size
location = var.zone
user_data = templatefile(
"${path.module}/templates/cloud-init.tmpl",
{
ssh_public_keys = var.ssh_public_keys
}
)
firewall_ids = [hcloud_firewall.worker.id]
}
resource "hcloud_server_network" "worker" {
for_each = hcloud_server.worker
server_id = each.value.id
subnet_id = hcloud_network_subnet.kubernetes.id
}
resource "hcloud_firewall" "master" {
name = "${var.prefix}-master-firewall"
rule {
direction = "in"
protocol = "tcp"
port = "22"
source_ips = var.ssh_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "6443"
source_ips = var.api_server_whitelist
}
}
resource "hcloud_firewall" "worker" {
name = "${var.prefix}-worker-firewall"
rule {
direction = "in"
protocol = "tcp"
port = "22"
source_ips = var.ssh_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "80"
source_ips = var.ingress_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "443"
source_ips = var.ingress_whitelist
}
rule {
direction = "in"
protocol = "tcp"
port = "30000-32767"
source_ips = var.nodeport_whitelist
}
}

View File

@@ -0,0 +1,27 @@
output "master_ip_addresses" {
value = {
for key, instance in hcloud_server.master :
instance.name => {
"private_ip" = hcloud_server_network.master[key].ip
"public_ip" = hcloud_server.master[key].ipv4_address
}
}
}
output "worker_ip_addresses" {
value = {
for key, instance in hcloud_server.worker :
instance.name => {
"private_ip" = hcloud_server_network.worker[key].ip
"public_ip" = hcloud_server.worker[key].ipv4_address
}
}
}
output "cluster_private_network_cidr" {
value = var.private_subnet_cidr
}
output "network_id" {
value = hcloud_network.kubernetes.id
}

View File

@@ -0,0 +1,16 @@
#cloud-config
users:
- default
- name: ubuntu
shell: /bin/bash
sudo: "ALL=(ALL) NOPASSWD:ALL"
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}

View File

@@ -0,0 +1,44 @@
variable "zone" {
type = string
}
variable "prefix" {}
variable "machines" {
type = map(object({
node_type = string
size = string
image = string
}))
}
variable "ssh_public_keys" {
type = list(string)
}
variable "ssh_whitelist" {
type = list(string)
}
variable "api_server_whitelist" {
type = list(string)
}
variable "nodeport_whitelist" {
type = list(string)
}
variable "ingress_whitelist" {
type = list(string)
}
variable "private_network_cidr" {
default = "10.0.0.0/16"
}
variable "private_subnet_cidr" {
default = "10.0.10.0/24"
}
variable "network_zone" {
default = "eu-central"
}

View File

@@ -0,0 +1,9 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.38.2"
}
}
required_version = ">= 0.14"
}

View File

@@ -0,0 +1,7 @@
output "master_ips" {
value = module.kubernetes.master_ip_addresses
}
output "worker_ips" {
value = module.kubernetes.worker_ip_addresses
}

View File

@@ -0,0 +1,46 @@
prefix = "default"
zone = "hel1"
network_zone = "eu-central"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
ssh_private_key_path = "~/.ssh/id_rsa"
machines = {
"master-0" : {
"node_type" : "master",
"size" : "cx21",
"image" : "ubuntu-22.04",
},
"worker-0" : {
"node_type" : "worker",
"size" : "cx21",
"image" : "ubuntu-22.04",
},
"worker-1" : {
"node_type" : "worker",
"size" : "cx21",
"image" : "ubuntu-22.04",
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ingress_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View File

@@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View File

@@ -0,0 +1,19 @@
[all]
${connection_strings_master}
${connection_strings_worker}
[kube_control_plane]
${list_master}
[etcd]
${list_master}
[kube_node]
${list_worker}
[k8s_cluster:children]
kube-master
kube-node
[k8s_cluster:vars]
network_id=${network_id}

View File

@@ -0,0 +1,56 @@
variable "zone" {
description = "The zone where to run the cluster"
}
variable "network_zone" {
description = "The network zone where the cluster is running"
default = "eu-central"
}
variable "prefix" {
description = "Prefix for resource names"
default = "default"
}
variable "machines" {
description = "Cluster machines"
type = map(object({
node_type = string
size = string
image = string
}))
}
variable "ssh_public_keys" {
description = "Public SSH key which are injected into the VMs."
type = list(string)
}
variable "ssh_private_key_path" {
description = "Private SSH key which connect to the VMs."
type = string
default = "~/.ssh/id_rsa"
}
variable "ssh_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for ssh"
type = list(string)
}
variable "api_server_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
type = list(string)
}
variable "nodeport_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
type = list(string)
}
variable "ingress_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for HTTP"
type = list(string)
}
variable "inventory_file" {
description = "Where to store the generated inventory file"
}

View File

@@ -0,0 +1,12 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.38.2"
}
null = {
source = "hashicorp/null"
}
}
required_version = ">= 0.14"
}

5
contrib/terraform/nifcloud/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
*.tfstate*
.terraform.lock.hcl
.terraform
sample-inventory/inventory.ini

View File

@@ -0,0 +1,137 @@
# Kubernetes on NIFCLOUD with Terraform
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray
## Overview
The setup looks like following
```text
Kubernetes cluster
+----------------------------+
+---------------+ | +--------------------+ |
| | | | +--------------------+ |
| API server LB +---------> | | | |
| | | | | Control Plane/etcd | |
+---------------+ | | | node(s) | |
| +-+ | |
| +--------------------+ |
| ^ |
| | |
| v |
| +--------------------+ |
| | +--------------------+ |
| | | | |
| | | Worker | |
| | | node(s) | |
| +-+ | |
| +--------------------+ |
+----------------------------+
```
## Requirements
* Terraform 1.3.7
## Quickstart
### Export Variables
* Your NIFCLOUD credentials:
```bash
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY>
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY>
```
* The SSH KEY used to connect to the instance:
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm)
```bash
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME>
```
* The IP address to connect to bastion server:
```bash
export TF_VAR_working_instance_ip=$(curl ifconfig.me)
```
### Create The Infrastructure
* Run terraform:
```bash
terraform init
terraform apply -var-file ./sample-inventory/cluster.tfvars
```
### Setup The Kubernetes
* Generate cluster configuration file:
```bash
./generate-inventory.sh > sample-inventory/inventory.ini
* Export Variables:
```bash
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip')
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb')
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip')
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\""
```
* Set ssh-agent"
```bash
eval `ssh-agent`
ssh-add <THE PATH TO YOUR SSH KEY>
```
* Run cluster.yml playbook:
```bash
cd ./../../../
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml
```
### Connecting to Kubernetes
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost
* Fetching kubeconfig file:
```bash
mkdir -p ~/.kube
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config
```
* Rewrite /etc/hosts
```bash
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts
```
* Run kubectl
```bash
kubectl get node
```
## Variables
* `region`: Region where to run the cluster
* `az`: Availability zone where to run the cluster
* `private_ip_bn`: Private ip address of bastion server
* `private_network_cidr`: Subnet of private network
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name
* `private_ip`: private ip address of machine
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name
* `private_ip`: private ip address of machine
* `instance_key_name`: The key name of the Key Pair to use for the instance
* `instance_type_bn`: The instance type of bastion server
* `instance_type_wk`: The instance type of worker node
* `instance_type_cp`: The instance type of control plane
* `image_name`: OS image used for the instance
* `working_instance_ip`: The IP address to connect to bastion server
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use)

View File

@@ -0,0 +1,64 @@
#!/bin/bash
#
# Generates a inventory file based on the terraform output.
# After provisioning a cluster, simply run this command and supply the terraform state file
# Default state file is terraform.tfstate
#
set -e
TF_OUT=$(terraform output -json)
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}"))
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}"))
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}"))
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
echo "[all]"
# Generate control plane hosts
i=1
for name in "${CONTROL_PLANE_NAMES[@]}"; do
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}"))
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}"
i=$(( i + 1 ))
done
# Generate worker hosts
for name in "${WORKER_NAMES[@]}"; do
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}"
done
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}"))
echo ""
echo "[all:vars]"
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']"
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}"
echo ""
echo "[kube_control_plane]"
for name in "${CONTROL_PLANE_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[etcd]"
for name in "${CONTROL_PLANE_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[kube_node]"
for name in "${WORKER_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[k8s_cluster:children]"
echo "kube_control_plane"
echo "kube_node"

View File

@@ -0,0 +1,36 @@
provider "nifcloud" {
region = var.region
}
module "kubernetes_cluster" {
source = "./modules/kubernetes-cluster"
availability_zone = var.az
prefix = "dev"
private_network_cidr = var.private_network_cidr
instance_key_name = var.instance_key_name
instances_cp = var.instances_cp
instances_wk = var.instances_wk
image_name = var.image_name
instance_type_bn = var.instance_type_bn
instance_type_cp = var.instance_type_cp
instance_type_wk = var.instance_type_wk
private_ip_bn = var.private_ip_bn
additional_lb_filter = [var.working_instance_ip]
}
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
security_group_names = [
module.kubernetes_cluster.security_group_name.bastion
]
type = "IN"
from_port = 22
to_port = 22
protocol = "TCP"
cidr_ip = var.working_instance_ip
}

View File

@@ -0,0 +1,301 @@
#################################################
##
## Local variables
##
locals {
# e.g. east-11 is 11
az_num = reverse(split("-", var.availability_zone))[0]
# e.g. east-11 is e11
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}"
# Port used by the protocol
port_ssh = 22
port_kubectl = 6443
port_kubelet = 10250
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements
port_bgp = 179
port_vxlan = 4789
port_etcd = 2379
}
#################################################
##
## General
##
# data
data "nifcloud_image" "this" {
image_name = var.image_name
}
# private lan
resource "nifcloud_private_lan" "this" {
private_lan_name = "${var.prefix}lan"
availability_zone = var.availability_zone
cidr_block = var.private_network_cidr
accounting_type = var.accounting_type
}
#################################################
##
## Bastion
##
resource "nifcloud_security_group" "bn" {
group_name = "${var.prefix}bn"
description = "${var.prefix} bastion"
availability_zone = var.availability_zone
}
resource "nifcloud_instance" "bn" {
instance_id = "${local.az_short_name}${var.prefix}bn01"
security_group = nifcloud_security_group.bn.group_name
instance_type = var.instance_type_bn
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
private_ip_address = var.private_ip_bn
ssh_port = local.port_ssh
hostname = "${local.az_short_name}${var.prefix}bn01"
})
availability_zone = var.availability_zone
accounting_type = var.accounting_type
image_id = data.nifcloud_image.this.image_id
key_name = var.instance_key_name
network_interface {
network_id = "net-COMMON_GLOBAL"
}
network_interface {
network_id = nifcloud_private_lan.this.network_id
ip_address = "static"
}
# The image_id changes when the OS image type is demoted from standard to public.
lifecycle {
ignore_changes = [
image_id,
user_data,
]
}
}
#################################################
##
## Control Plane
##
resource "nifcloud_security_group" "cp" {
group_name = "${var.prefix}cp"
description = "${var.prefix} control plane"
availability_zone = var.availability_zone
}
resource "nifcloud_instance" "cp" {
for_each = var.instances_cp
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
security_group = nifcloud_security_group.cp.group_name
instance_type = var.instance_type_cp
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
private_ip_address = each.value.private_ip
ssh_port = local.port_ssh
hostname = "${local.az_short_name}${var.prefix}${each.key}"
})
availability_zone = var.availability_zone
accounting_type = var.accounting_type
image_id = data.nifcloud_image.this.image_id
key_name = var.instance_key_name
network_interface {
network_id = "net-COMMON_GLOBAL"
}
network_interface {
network_id = nifcloud_private_lan.this.network_id
ip_address = "static"
}
# The image_id changes when the OS image type is demoted from standard to public.
lifecycle {
ignore_changes = [
image_id,
user_data,
]
}
}
resource "nifcloud_load_balancer" "this" {
load_balancer_name = "${local.az_short_name}${var.prefix}cp"
accounting_type = var.accounting_type
balancing_type = 1 // Round-Robin
load_balancer_port = local.port_kubectl
instance_port = local.port_kubectl
instances = [for v in nifcloud_instance.cp : v.instance_id]
filter = concat(
[for k, v in nifcloud_instance.cp : v.public_ip],
[for k, v in nifcloud_instance.wk : v.public_ip],
var.additional_lb_filter,
)
filter_type = 1 // Allow
}
#################################################
##
## Worker
##
resource "nifcloud_security_group" "wk" {
group_name = "${var.prefix}wk"
description = "${var.prefix} worker"
availability_zone = var.availability_zone
}
resource "nifcloud_instance" "wk" {
for_each = var.instances_wk
instance_id = "${local.az_short_name}${var.prefix}${each.key}"
security_group = nifcloud_security_group.wk.group_name
instance_type = var.instance_type_wk
user_data = templatefile("${path.module}/templates/userdata.tftpl", {
private_ip_address = each.value.private_ip
ssh_port = local.port_ssh
hostname = "${local.az_short_name}${var.prefix}${each.key}"
})
availability_zone = var.availability_zone
accounting_type = var.accounting_type
image_id = data.nifcloud_image.this.image_id
key_name = var.instance_key_name
network_interface {
network_id = "net-COMMON_GLOBAL"
}
network_interface {
network_id = nifcloud_private_lan.this.network_id
ip_address = "static"
}
# The image_id changes when the OS image type is demoted from standard to public.
lifecycle {
ignore_changes = [
image_id,
user_data,
]
}
}
#################################################
##
## Security Group Rule: Kubernetes
##
# ssh
resource "nifcloud_security_group_rule" "ssh_from_bastion" {
security_group_names = [
nifcloud_security_group.wk.group_name,
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_ssh
to_port = local.port_ssh
protocol = "TCP"
source_security_group_name = nifcloud_security_group.bn.group_name
}
# kubectl
resource "nifcloud_security_group_rule" "kubectl_from_worker" {
security_group_names = [
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_kubectl
to_port = local.port_kubectl
protocol = "TCP"
source_security_group_name = nifcloud_security_group.wk.group_name
}
# kubelet
resource "nifcloud_security_group_rule" "kubelet_from_worker" {
security_group_names = [
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_kubelet
to_port = local.port_kubelet
protocol = "TCP"
source_security_group_name = nifcloud_security_group.wk.group_name
}
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" {
security_group_names = [
nifcloud_security_group.wk.group_name,
]
type = "IN"
from_port = local.port_kubelet
to_port = local.port_kubelet
protocol = "TCP"
source_security_group_name = nifcloud_security_group.cp.group_name
}
#################################################
##
## Security Group Rule: calico
##
# vslan
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" {
security_group_names = [
nifcloud_security_group.wk.group_name,
]
type = "IN"
from_port = local.port_vxlan
to_port = local.port_vxlan
protocol = "UDP"
source_security_group_name = nifcloud_security_group.cp.group_name
}
resource "nifcloud_security_group_rule" "vxlan_from_worker" {
security_group_names = [
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_vxlan
to_port = local.port_vxlan
protocol = "UDP"
source_security_group_name = nifcloud_security_group.wk.group_name
}
# bgp
resource "nifcloud_security_group_rule" "bgp_from_control_plane" {
security_group_names = [
nifcloud_security_group.wk.group_name,
]
type = "IN"
from_port = local.port_bgp
to_port = local.port_bgp
protocol = "TCP"
source_security_group_name = nifcloud_security_group.cp.group_name
}
resource "nifcloud_security_group_rule" "bgp_from_worker" {
security_group_names = [
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_bgp
to_port = local.port_bgp
protocol = "TCP"
source_security_group_name = nifcloud_security_group.wk.group_name
}
# etcd
resource "nifcloud_security_group_rule" "etcd_from_worker" {
security_group_names = [
nifcloud_security_group.cp.group_name,
]
type = "IN"
from_port = local.port_etcd
to_port = local.port_etcd
protocol = "TCP"
source_security_group_name = nifcloud_security_group.wk.group_name
}

View File

@@ -0,0 +1,48 @@
output "control_plane_lb" {
description = "The DNS name of LB for control plane"
value = nifcloud_load_balancer.this.dns_name
}
output "security_group_name" {
description = "The security group used in the cluster"
value = {
bastion = nifcloud_security_group.bn.group_name,
control_plane = nifcloud_security_group.cp.group_name,
worker = nifcloud_security_group.wk.group_name,
}
}
output "private_network_id" {
description = "The private network used in the cluster"
value = nifcloud_private_lan.this.id
}
output "bastion_info" {
description = "The basion information in cluster"
value = { (nifcloud_instance.bn.instance_id) : {
instance_id = nifcloud_instance.bn.instance_id,
unique_id = nifcloud_instance.bn.unique_id,
private_ip = nifcloud_instance.bn.private_ip,
public_ip = nifcloud_instance.bn.public_ip,
} }
}
output "worker_info" {
description = "The worker information in cluster"
value = { for v in nifcloud_instance.wk : v.instance_id => {
instance_id = v.instance_id,
unique_id = v.unique_id,
private_ip = v.private_ip,
public_ip = v.public_ip,
} }
}
output "control_plane_info" {
description = "The control plane information in cluster"
value = { for v in nifcloud_instance.cp : v.instance_id => {
instance_id = v.instance_id,
unique_id = v.unique_id,
private_ip = v.private_ip,
public_ip = v.public_ip,
} }
}

View File

@@ -0,0 +1,45 @@
#!/bin/bash
#################################################
##
## IP Address
##
configure_private_ip_address () {
cat << EOS > /etc/netplan/01-netcfg.yaml
network:
version: 2
renderer: networkd
ethernets:
ens192:
dhcp4: yes
dhcp6: yes
dhcp-identifier: mac
ens224:
dhcp4: no
dhcp6: no
addresses: [${private_ip_address}]
EOS
netplan apply
}
configure_private_ip_address
#################################################
##
## SSH
##
configure_ssh_port () {
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config
}
configure_ssh_port
#################################################
##
## Hostname
##
hostnamectl set-hostname ${hostname}
#################################################
##
## Disable swap files genereated by systemd-gpt-auto-generator
##
systemctl mask "dev-sda3.swap"

View File

@@ -0,0 +1,9 @@
terraform {
required_version = ">=1.3.7"
required_providers {
nifcloud = {
source = "nifcloud/nifcloud"
version = ">= 1.8.0, < 2.0.0"
}
}
}

View File

@@ -0,0 +1,81 @@
variable "availability_zone" {
description = "The availability zone"
type = string
}
variable "prefix" {
description = "The prefix for the entire cluster"
type = string
validation {
condition = length(var.prefix) <= 5
error_message = "Must be a less than 5 character long."
}
}
variable "private_network_cidr" {
description = "The subnet of private network"
type = string
validation {
condition = can(cidrnetmask(var.private_network_cidr))
error_message = "Must be a valid IPv4 CIDR block address."
}
}
variable "private_ip_bn" {
description = "Private IP of bastion server"
type = string
}
variable "instances_cp" {
type = map(object({
private_ip = string
}))
}
variable "instances_wk" {
type = map(object({
private_ip = string
}))
}
variable "instance_key_name" {
description = "The key name of the Key Pair to use for the instance"
type = string
}
variable "instance_type_bn" {
description = "The instance type of bastion server"
type = string
}
variable "instance_type_wk" {
description = "The instance type of worker"
type = string
}
variable "instance_type_cp" {
description = "The instance type of control plane"
type = string
}
variable "image_name" {
description = "The name of image"
type = string
}
variable "additional_lb_filter" {
description = "Additional LB filter"
type = list(string)
}
variable "accounting_type" {
type = string
default = "1"
validation {
condition = anytrue([
var.accounting_type == "1", // Monthly
var.accounting_type == "2", // Pay per use
])
error_message = "Must be a 1 or 2."
}
}

View File

@@ -0,0 +1,3 @@
output "kubernetes_cluster" {
value = module.kubernetes_cluster
}

View File

@@ -0,0 +1,22 @@
region = "jp-west-1"
az = "west-11"
instance_key_name = "deployerkey"
instance_type_bn = "e-medium"
instance_type_cp = "e-medium"
instance_type_wk = "e-medium"
private_network_cidr = "192.168.30.0/24"
instances_cp = {
"cp01" : { private_ip : "192.168.30.11/24" }
"cp02" : { private_ip : "192.168.30.12/24" }
"cp03" : { private_ip : "192.168.30.13/24" }
}
instances_wk = {
"wk01" : { private_ip : "192.168.30.21/24" }
"wk02" : { private_ip : "192.168.30.22/24" }
}
private_ip_bn = "192.168.30.10/24"
image_name = "Ubuntu Server 22.04 LTS"

View File

@@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View File

@@ -0,0 +1,9 @@
terraform {
required_version = ">=1.3.7"
required_providers {
nifcloud = {
source = "nifcloud/nifcloud"
version = "1.8.0"
}
}
}

View File

@@ -0,0 +1,77 @@
variable "region" {
description = "The region"
type = string
}
variable "az" {
description = "The availability zone"
type = string
}
variable "private_ip_bn" {
description = "Private IP of bastion server"
type = string
}
variable "private_network_cidr" {
description = "The subnet of private network"
type = string
validation {
condition = can(cidrnetmask(var.private_network_cidr))
error_message = "Must be a valid IPv4 CIDR block address."
}
}
variable "instances_cp" {
type = map(object({
private_ip = string
}))
}
variable "instances_wk" {
type = map(object({
private_ip = string
}))
}
variable "instance_key_name" {
description = "The key name of the Key Pair to use for the instance"
type = string
}
variable "instance_type_bn" {
description = "The instance type of bastion server"
type = string
}
variable "instance_type_wk" {
description = "The instance type of worker"
type = string
}
variable "instance_type_cp" {
description = "The instance type of control plane"
type = string
}
variable "image_name" {
description = "The name of image"
type = string
}
variable "working_instance_ip" {
description = "The IP address to connect to bastion server."
type = string
}
variable "accounting_type" {
type = string
default = "2"
validation {
condition = anytrue([
var.accounting_type == "1", // Monthly
var.accounting_type == "2", // Pay per use
])
error_message = "Must be a 1 or 2."
}
}

View File

@@ -0,0 +1,5 @@
.terraform
*.tfvars
!sample-inventory/cluster.tfvars
*.tfstate
*.tfstate.backup

View File

@@ -0,0 +1,786 @@
# Kubernetes on OpenStack with Terraform
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
OpenStack.
## Status
This will install a Kubernetes cluster on an OpenStack Cloud. It should work on
most modern installs of OpenStack that support the basic services.
### Known compatible public clouds
- [Auro](https://auro.io/)
- [Betacloud](https://www.betacloud.io/)
- [CityCloud](https://www.citycloud.com/)
- [DreamHost](https://www.dreamhost.com/cloud/computing/)
- [ELASTX](https://elastx.se/)
- [EnterCloudSuite](https://www.entercloudsuite.com/)
- [FugaCloud](https://fuga.cloud/)
- [Open Telekom Cloud](https://cloud.telekom.de/)
- [OVH](https://www.ovh.com/)
- [Rackspace](https://www.rackspace.com/)
- [Safespring](https://www.safespring.com)
- [Ultimum](https://ultimum.io/)
- [VexxHost](https://vexxhost.com/)
- [Zetta](https://www.zetta.io/)
## Approach
The terraform configuration inspects variables found in
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
There is a [python script](../terraform.py) that reads the generated`.tfstate`
file to generate a dynamic inventory that is consumed by the main ansible script
to actually install kubernetes and stand up the cluster.
### Networking
The configuration includes creating a private subnet with a router to the
external net. It will allocate floating IPs from a pool and assign them to the
hosts where that makes sense. You have the option of creating bastion hosts
inside the private subnet to access the nodes there. Alternatively, a node with
a floating IP can be used as a jump host to nodes without.
#### Using an existing router
It is possible to use an existing router instead of creating one. To use an
existing router set the router\_id variable to the uuid of the router you wish
to use.
For example:
```ShellSession
router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3"
```
### Kubernetes Nodes
You can create many different kubernetes topologies by setting the number of
different classes of hosts. For each class there are options for allocating
floating IP addresses or not.
- Master nodes with etcd
- Master nodes without etcd
- Standalone etcd hosts
- Kubernetes worker nodes
Note that the Ansible script will report an invalid configuration if you wind up
with an even number of etcd instances since that is not a valid configuration. This
restriction includes standalone etcd nodes that are deployed in a cluster along with
master nodes with etcd replicas. As an example, if you have three master nodes with
etcd replicas and three standalone etcd nodes, the script will fail since there are
now six total etcd replicas.
### GlusterFS shared file system
The Terraform configuration supports provisioning of an optional GlusterFS
shared file system based on a separate set of VMs. To enable this, you need to
specify:
- the number of Gluster hosts (minimum 2)
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
- Other properties related to provisioning the hosts
Even if you are using Flatcar Container Linux by Kinvolk for your cluster, you will still
need the GlusterFS VMs to be based on either Debian or RedHat based images.
Flatcar Container Linux by Kinvolk cannot serve GlusterFS, but can connect to it through
binaries available on hyperkube v1.4.3_coreos.0 or higher.
## Requirements
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
- you already have a suitable OS image in Glance
- you already have a floating IP pool created
- you have security groups enabled
- you have a pair of keys generated that can be used to secure the new hosts
## Module Architecture
The configuration is divided into three modules:
- Network
- IPs
- Compute
The main reason for splitting the configuration up in this way is to easily
accommodate situations where floating IPs are limited by a quota or if you have
any external references to the floating IP (e.g. DNS) that would otherwise have
to be updated.
You can force your existing IPs by modifying the compute variables in
`kubespray.tf` as follows:
```ini
k8s_master_fips = ["151.101.129.67"]
k8s_node_fips = ["151.101.129.68"]
```
## Terraform
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
### Configuration
#### Inventory files
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
```ShellSession
cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
cd inventory/$CLUSTER
ln -s ../../contrib/terraform/openstack/hosts
ln -s ../../contrib
```
This will be the base for subsequent Terraform commands.
#### OpenStack access and credentials
No provider variables are hardcoded inside `variables.tf` because Terraform
supports various authentication methods for OpenStack: the older script and
environment method (using `openrc`) as well as a newer declarative method, and
different OpenStack environments may support Identity API version 2 or 3.
These are examples and may vary depending on your OpenStack cloud provider,
for an exhaustive list on how to authenticate on OpenStack with Terraform
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
##### Declarative method (recommended)
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
- the current directory
- `~/.config/openstack`
- `/etc/openstack`
`clouds.yaml`:
```yaml
clouds:
mycloud:
auth:
auth_url: https://openstack:5000/v3
username: "username"
project_name: "projectname"
project_id: projectid
user_domain_name: "Default"
password: "password"
region_name: "RegionOne"
interface: "public"
identity_api_version: 3
```
If you have multiple clouds defined in your `clouds.yaml` file you can choose
the one you want to use with the environment variable `OS_CLOUD`:
```ShellSession
export OS_CLOUD=mycloud
```
##### Openrc method
When using classic environment variables, Terraform uses default `OS_*`
environment variables. A script suitable for your environment may be available
from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
With identity v2:
```ShellSession
source openrc
env | grep OS
OS_AUTH_URL=https://openstack:5000/v2.0
OS_PROJECT_ID=projectid
OS_PROJECT_NAME=projectname
OS_USERNAME=username
OS_PASSWORD=password
OS_REGION_NAME=RegionOne
OS_INTERFACE=public
OS_IDENTITY_API_VERSION=2
```
With identity v3:
```ShellSession
source openrc
env | grep OS
OS_AUTH_URL=https://openstack:5000/v3
OS_PROJECT_ID=projectid
OS_PROJECT_NAME=username
OS_PROJECT_DOMAIN_ID=default
OS_USERNAME=username
OS_PASSWORD=password
OS_REGION_NAME=RegionOne
OS_INTERFACE=public
OS_IDENTITY_API_VERSION=3
OS_USER_DOMAIN_NAME=Default
```
Terraform does not support a mix of DomainName and DomainID, choose one or the other:
- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
```ShellSession
unset OS_USER_DOMAIN_NAME
export OS_USER_DOMAIN_ID=default
```
or
```ShellSession
unset OS_PROJECT_DOMAIN_ID
set OS_PROJECT_DOMAIN_NAME=Default
```
#### Cluster variables
The construction of the cluster is driven by values found in
[variables.tf](variables.tf).
For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|Variable | Description |
|---------|-------------|
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|`network_name` | The name to be given to the internal network that will be generated |
|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default |
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. |
|`external_net` | UUID of the external network that will be routed to |
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|`number_of_etcd` | Number of pure etcd nodes |
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default |
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) |
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
##### k8s_nodes
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement.
To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0.
Then define your desired worker node configuration using the `k8s_nodes` variable.
The `az`, `flavor` and `floating_ip` parameters are mandatory.
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
```yaml
k8s_nodes:
node-name:
az: string # Name of the AZ
flavor: string # Flavor ID to use
floating_ip: bool # If floating IPs should be created or not
extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups
image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image
root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise
volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type
network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name
server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy
cloudinit: # (optional) Options for cloud-init
extra_partitions: # List of extra partitions (other than the root partition) to setup during creation
volume_path: string # Path to the volume to create partition for (e.g. /dev/vda )
partition_path: string # Path to the partition (e.g. /dev/vda2 )
mount_path: string # Path to where the partition should be mounted
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
netplan_critical_dhcp_interface: string # Name of interface to set the dhcp flag critical = true, to circumvent [this issue](https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1776013).
```
For example:
```ini
k8s_nodes = {
"1" = {
"az" = "sto1"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
},
"2" = {
"az" = "sto2"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
},
"3" = {
"az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
"extra_groups" = "calico_rr"
}
}
```
Would result in the same configuration as:
```ini
number_of_k8s_nodes = 3
flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445"
az_list = ["sto1", "sto2", "sto3"]
```
And:
```ini
k8s_nodes = {
"ing-1" = {
"az" = "sto1"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
},
"ing-2" = {
"az" = "sto2"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
},
"ing-3" = {
"az" = "sto3"
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
"floating_ip" = true
},
"big-1" = {
"az" = "sto1"
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
"floating_ip" = false
},
"big-2" = {
"az" = "sto2"
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
"floating_ip" = false
},
"big-3" = {
"az" = "sto3"
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
"floating_ip" = false
},
"small-1" = {
"az" = "sto1"
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
"floating_ip" = false
},
"small-2" = {
"az" = "sto2"
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
"floating_ip" = false
},
"small-3" = {
"az" = "sto3"
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
"floating_ip" = false
}
}
```
Would result in three nodes in each availability zone each with their own separate naming,
flavor and floating ip configuration.
The "schema":
```ini
k8s_nodes = {
"key | node name suffix, must be unique" = {
"az" = string
"flavor" = string
"floating_ip" = bool
},
}
```
All values are required.
#### Terraform state files
In the cluster's inventory folder, the following files might be created (either by Terraform
or manually), to prevent you from pushing them accidentally they are in a
`.gitignore` file in the `terraform/openstack` directory :
- `.terraform`
- `.tfvars`
- `.tfstate`
- `.tfstate.backup`
You can still add them manually if you want to.
### Initialization
Before Terraform can operate on your cluster you need to install the required
plugins. This is accomplished as follows:
```ShellSession
cd inventory/$CLUSTER
terraform -chdir="../../contrib/terraform/openstack" init
```
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
### Customizing with cloud-init
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
One common template is used for all instances. Adjust the file shown below:
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl`
For example, to enable openstack novnc access and ansible_user=root SSH access:
```ShellSession
#cloud-config
## in some cases novnc console access is required
## it requires ssh password to be set
ssh_pwauth: yes
chpasswd:
list: |
root:secret
expire: False
## in some cases direct root ssh access via ssh key is required
disable_root: false
```
### Provisioning cluster
You can apply the Terraform configuration to your cluster with the following command
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession
terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars
```
if you chose to create a bastion host, this script will create
`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
be able to access your machines tunneling through the bastion's IP address. If
you want to manually handle the ssh tunneling to these machines, please delete
or move that file. If you want to use this, just leave it there, as ansible will
pick it up automatically.
### Destroying cluster
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession
terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars
```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
- clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
### Debugging
You can enable debugging output from Terraform by setting
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
### Terraform output
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
## Ansible
### Node access
#### SSH
Ensure your local ssh-agent is running and your ssh key has been added. This
step is required by the terraform provisioner:
```ShellSession
eval $(ssh-agent -s)
ssh-add ~/.ssh/id_rsa
```
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
#### Metadata variables
The [python script](../terraform.py) that reads the
generated`.tfstate` file to generate a dynamic inventory recognizes
some variables within a "metadata" block, defined in a "resource"
block (example):
```ini
resource "openstack_compute_instance_v2" "example" {
...
metadata {
ssh_user = "ubuntu"
prefer_ipv6 = true
python_bin = "/usr/bin/python3"
}
...
}
```
As the example shows, these let you define the SSH username for
Ansible, a Python binary which is needed by Ansible if
`/usr/bin/python` doesn't exist, and whether the IPv6 address of the
instance should be preferred over IPv4.
#### Bastion host
Bastion access will be determined by:
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
So, either a bastion host, or at least master/node with a floating IP are required.
#### Test access
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
```ShellSession
$ ansible -i inventory/$CLUSTER/hosts -m ping all
example-k8s_node-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
example-etcd-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
example-k8s-master-1 | SUCCESS => {
"changed": false,
"ping": "pong"
}
```
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
### Configure cluster variables
Edit `inventory/$CLUSTER/group_vars/all/all.yml`:
- **bin_dir**:
```yml
# Directory where the binaries will be installed
# Default:
# bin_dir: /usr/local/bin
# For Flatcar Container Linux by Kinvolk:
bin_dir: /opt/bin
```
- and **cloud_provider**:
```yml
cloud_provider: openstack
```
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
- Set variable **kube_network_plugin** to your desired networking plugin.
- **flannel** works out-of-the-box
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
```yml
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
```
- Set variable **resolvconf_mode**
```yml
# Can be docker_dns, host_resolvconf or none
# Default:
# resolvconf_mode: docker_dns
# For Flatcar Container Linux by Kinvolk:
resolvconf_mode: host_resolvconf
```
- Set max amount of attached cinder volume per host (default 256)
```yml
node_volume_attach_limit: 26
```
### Deploy Kubernetes
```ShellSession
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
```
This will take some time as there are many tasks to run.
## Kubernetes
### Set up kubectl
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
2. Add a route to the internal IP of a master node (if needed):
```ShellSession
sudo route add [master-internal-ip] gw [router-ip]
```
or
```ShellSession
sudo route add -net [internal-subnet]/24 gw [router-ip]
```
1. List Kubernetes certificates & keys:
```ShellSession
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
```
1. Get `admin`'s certificates and keys:
```ShellSession
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
```
1. Configure kubectl:
```ShellSession
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
--certificate-authority=ca.pem
$ kubectl config set-credentials default-admin \
--certificate-authority=ca.pem \
--client-key=admin-key.pem \
--client-certificate=admin.pem
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
$ kubectl config use-context default-system
```
1. Check it:
```ShellSession
kubectl version
```
## GlusterFS
GlusterFS is not deployed by the standard `cluster.yml` playbook, see the
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
for instructions.
Basically you will install Gluster as
```ShellSession
ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
```
## What's next
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
## Appendix
### Migration from `number_of_k8s_nodes*` to `k8s_nodes`
If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish
to migrate to the `k8s_nodes` style you can do it like so:
```ShellSession
$ terraform state list
module.compute.data.openstack_images_image_v2.gfs_image
module.compute.data.openstack_images_image_v2.vm_image
module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0]
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]
module.compute.openstack_compute_instance_v2.k8s_master[0]
module.compute.openstack_compute_instance_v2.k8s_node[0]
module.compute.openstack_compute_instance_v2.k8s_node[1]
module.compute.openstack_compute_instance_v2.k8s_node[2]
module.compute.openstack_compute_keypair_v2.k8s
module.compute.openstack_compute_servergroup_v2.k8s_etcd[0]
module.compute.openstack_compute_servergroup_v2.k8s_master[0]
module.compute.openstack_compute_servergroup_v2.k8s_node[0]
module.compute.openstack_networking_secgroup_rule_v2.bastion[0]
module.compute.openstack_networking_secgroup_rule_v2.egress[0]
module.compute.openstack_networking_secgroup_rule_v2.k8s
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0]
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1]
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2]
module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0]
module.compute.openstack_networking_secgroup_rule_v2.worker[0]
module.compute.openstack_networking_secgroup_rule_v2.worker[1]
module.compute.openstack_networking_secgroup_rule_v2.worker[2]
module.compute.openstack_networking_secgroup_rule_v2.worker[3]
module.compute.openstack_networking_secgroup_rule_v2.worker[4]
module.compute.openstack_networking_secgroup_v2.bastion[0]
module.compute.openstack_networking_secgroup_v2.k8s
module.compute.openstack_networking_secgroup_v2.k8s_master
module.compute.openstack_networking_secgroup_v2.worker
module.ips.null_resource.dummy_dependency
module.ips.openstack_networking_floatingip_v2.k8s_master[0]
module.ips.openstack_networking_floatingip_v2.k8s_node[0]
module.ips.openstack_networking_floatingip_v2.k8s_node[1]
module.ips.openstack_networking_floatingip_v2.k8s_node[2]
module.network.openstack_networking_network_v2.k8s[0]
module.network.openstack_networking_router_interface_v2.k8s[0]
module.network.openstack_networking_router_v2.k8s[0]
module.network.openstack_networking_subnet_v2.k8s[0]
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]'
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]'
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]'
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]'
Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]'
Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]'
Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]'
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]'
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]"
Successfully moved 1 object(s).
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]'
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]"
Successfully moved 1 object(s).
```
Of course for nodes without floating ips those steps can be omitted.

View File

@@ -0,0 +1 @@
../terraform.py

View File

@@ -0,0 +1,130 @@
module "network" {
source = "./modules/network"
external_net = var.external_net
network_name = var.network_name
subnet_cidr = var.subnet_cidr
cluster_name = var.cluster_name
dns_nameservers = var.dns_nameservers
network_dns_domain = var.network_dns_domain
use_neutron = var.use_neutron
port_security_enabled = var.port_security_enabled
router_id = var.router_id
}
module "ips" {
source = "./modules/ips"
number_of_k8s_masters = var.number_of_k8s_masters
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
number_of_k8s_nodes = var.number_of_k8s_nodes
floatingip_pool = var.floatingip_pool
number_of_bastions = var.number_of_bastions
external_net = var.external_net
network_name = var.network_name
router_id = module.network.router_id
k8s_nodes = var.k8s_nodes
k8s_masters = var.k8s_masters
k8s_master_fips = var.k8s_master_fips
bastion_fips = var.bastion_fips
router_internal_port_id = module.network.router_internal_port_id
}
module "compute" {
source = "./modules/compute"
cluster_name = var.cluster_name
az_list = var.az_list
az_list_node = var.az_list_node
number_of_k8s_masters = var.number_of_k8s_masters
number_of_k8s_masters_no_etcd = var.number_of_k8s_masters_no_etcd
number_of_etcd = var.number_of_etcd
number_of_k8s_masters_no_floating_ip = var.number_of_k8s_masters_no_floating_ip
number_of_k8s_masters_no_floating_ip_no_etcd = var.number_of_k8s_masters_no_floating_ip_no_etcd
number_of_k8s_nodes = var.number_of_k8s_nodes
number_of_bastions = var.number_of_bastions
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
k8s_masters = var.k8s_masters
k8s_nodes = var.k8s_nodes
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
master_root_volume_size_in_gb = var.master_root_volume_size_in_gb
node_root_volume_size_in_gb = var.node_root_volume_size_in_gb
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
master_volume_type = var.master_volume_type
node_volume_type = var.node_volume_type
public_key_path = var.public_key_path
image = var.image
image_uuid = var.image_uuid
image_gfs = var.image_gfs
image_master = var.image_master
image_master_uuid = var.image_master_uuid
image_gfs_uuid = var.image_gfs_uuid
ssh_user = var.ssh_user
ssh_user_gfs = var.ssh_user_gfs
flavor_k8s_master = var.flavor_k8s_master
flavor_k8s_node = var.flavor_k8s_node
flavor_etcd = var.flavor_etcd
flavor_gfs_node = var.flavor_gfs_node
network_name = var.network_name
flavor_bastion = var.flavor_bastion
k8s_master_fips = module.ips.k8s_master_fips
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
k8s_masters_fips = module.ips.k8s_masters_fips
k8s_node_fips = module.ips.k8s_node_fips
k8s_nodes_fips = module.ips.k8s_nodes_fips
bastion_fips = module.ips.bastion_fips
bastion_allowed_remote_ips = var.bastion_allowed_remote_ips
master_allowed_remote_ips = var.master_allowed_remote_ips
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
k8s_allowed_egress_ips = var.k8s_allowed_egress_ips
supplementary_master_groups = var.supplementary_master_groups
supplementary_node_groups = var.supplementary_node_groups
master_allowed_ports = var.master_allowed_ports
worker_allowed_ports = var.worker_allowed_ports
bastion_allowed_ports = var.bastion_allowed_ports
use_access_ip = var.use_access_ip
master_server_group_policy = var.master_server_group_policy
node_server_group_policy = var.node_server_group_policy
etcd_server_group_policy = var.etcd_server_group_policy
extra_sec_groups = var.extra_sec_groups
extra_sec_groups_name = var.extra_sec_groups_name
group_vars_path = var.group_vars_path
port_security_enabled = var.port_security_enabled
force_null_port_security = var.force_null_port_security
network_router_id = module.network.router_id
network_id = module.network.network_id
use_existing_network = var.use_existing_network
private_subnet_id = module.network.subnet_id
additional_server_groups = var.additional_server_groups
depends_on = [
module.network.subnet_id
]
}
output "private_subnet_id" {
value = module.network.subnet_id
}
output "floating_network_id" {
value = var.external_net
}
output "router_id" {
value = module.network.router_id
}
output "k8s_master_fips" {
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
}
output "k8s_node_fips" {
value = var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]
}
output "bastion_fips" {
value = module.ips.bastion_fips
}

View File

@@ -0,0 +1 @@
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"

View File

@@ -0,0 +1,971 @@
data "openstack_images_image_v2" "vm_image" {
count = var.image_uuid == "" ? 1 : 0
most_recent = true
name = var.image
}
data "openstack_images_image_v2" "gfs_image" {
count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
most_recent = true
name = var.image_gfs == "" ? var.image : var.image_gfs
}
data "openstack_images_image_v2" "image_master" {
count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
name = var.image_master == "" ? var.image : var.image_master
}
data "cloudinit_config" "cloudinit" {
part {
content_type = "text/cloud-config"
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
extra_partitions = [],
netplan_critical_dhcp_interface = ""
})
}
}
data "openstack_networking_network_v2" "k8s_network" {
count = var.use_existing_network ? 1 : 0
name = var.network_name
}
resource "openstack_compute_keypair_v2" "k8s" {
name = "kubernetes-${var.cluster_name}"
public_key = chomp(file(var.public_key_path))
}
resource "openstack_networking_secgroup_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master"
description = "${var.cluster_name} - Kubernetes Master"
delete_default_rules = true
}
resource "openstack_networking_secgroup_v2" "k8s_master_extra" {
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}"
description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
count = length(var.master_allowed_remote_ips)
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "6443"
port_range_max = "6443"
remote_ip_prefix = var.master_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s_master.id
}
resource "openstack_networking_secgroup_rule_v2" "k8s_master_ports" {
count = length(var.master_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.master_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.master_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.master_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.master_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.k8s_master.id
}
resource "openstack_networking_secgroup_v2" "bastion" {
name = "${var.cluster_name}-bastion"
count = var.number_of_bastions != "" ? 1 : 0
description = "${var.cluster_name} - Bastion Server"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "bastion" {
count = var.number_of_bastions != "" ? length(var.bastion_allowed_remote_ips) : 0
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = var.bastion_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
count = length(var.bastion_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
}
resource "openstack_networking_secgroup_v2" "k8s" {
name = "${var.cluster_name}-k8s"
description = "${var.cluster_name} - Kubernetes"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "k8s" {
direction = "ingress"
ethertype = "IPv4"
remote_group_id = openstack_networking_secgroup_v2.k8s.id
security_group_id = openstack_networking_secgroup_v2.k8s.id
}
resource "openstack_networking_secgroup_rule_v2" "k8s_allowed_remote_ips" {
count = length(var.k8s_allowed_remote_ips)
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = "22"
port_range_max = "22"
remote_ip_prefix = var.k8s_allowed_remote_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s.id
}
resource "openstack_networking_secgroup_rule_v2" "egress" {
count = length(var.k8s_allowed_egress_ips)
direction = "egress"
ethertype = "IPv4"
remote_ip_prefix = var.k8s_allowed_egress_ips[count.index]
security_group_id = openstack_networking_secgroup_v2.k8s.id
}
resource "openstack_networking_secgroup_v2" "worker" {
name = "${var.cluster_name}-k8s-worker"
description = "${var.cluster_name} - Kubernetes worker nodes"
delete_default_rules = true
}
resource "openstack_networking_secgroup_v2" "worker_extra" {
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}"
description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform"
delete_default_rules = true
}
resource "openstack_networking_secgroup_rule_v2" "worker" {
count = length(var.worker_allowed_ports)
direction = "ingress"
ethertype = "IPv4"
protocol = lookup(var.worker_allowed_ports[count.index], "protocol", "tcp")
port_range_min = lookup(var.worker_allowed_ports[count.index], "port_range_min")
port_range_max = lookup(var.worker_allowed_ports[count.index], "port_range_max")
remote_ip_prefix = lookup(var.worker_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
security_group_id = openstack_networking_secgroup_v2.worker.id
}
resource "openstack_compute_servergroup_v2" "k8s_master" {
count = var.master_server_group_policy != "" ? 1 : 0
name = "k8s-master-srvgrp"
policies = [var.master_server_group_policy]
}
resource "openstack_compute_servergroup_v2" "k8s_node" {
count = var.node_server_group_policy != "" ? 1 : 0
name = "k8s-node-srvgrp"
policies = [var.node_server_group_policy]
}
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
count = var.etcd_server_group_policy != "" ? 1 : 0
name = "k8s-etcd-srvgrp"
policies = [var.etcd_server_group_policy]
}
resource "openstack_compute_servergroup_v2" "k8s_node_additional" {
for_each = var.additional_server_groups
name = "k8s-${each.key}-srvgrp"
policies = [each.value.policy]
}
locals {
# master groups
master_sec_groups = compact([
openstack_networking_secgroup_v2.k8s_master.id,
openstack_networking_secgroup_v2.k8s.id,
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "",
])
# worker groups
worker_sec_groups = compact([
openstack_networking_secgroup_v2.k8s.id,
openstack_networking_secgroup_v2.worker.id,
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "",
])
# bastion groups
bastion_sec_groups = compact(concat([
openstack_networking_secgroup_v2.k8s.id,
openstack_networking_secgroup_v2.bastion[0].id,
]))
# etcd groups
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
# glusterfs groups
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
# Image uuid
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
# Image_gfs uuid
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
# image_master uuidimage_gfs_uuid
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
k8s_nodes_settings = {
for name, node in var.k8s_nodes :
name => {
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0,
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_node,
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb,
"volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type,
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
"server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [])
}
}
k8s_masters_settings = {
for name, node in var.k8s_masters :
name => {
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0,
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_master,
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb,
"volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type,
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
}
}
}
resource "openstack_networking_port_v2" "bastion_port" {
count = var.number_of_bastions
name = "${var.cluster_name}-bastion-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "bastion" {
name = "${var.cluster_name}-bastion-${count.index + 1}"
count = var.number_of_bastions
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_bastion
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
content {
uuid = local.image_to_use_node
source_type = "image"
volume_size = var.bastion_root_volume_size_in_gb
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "bastion"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
}
}
resource "openstack_networking_port_v2" "k8s_master_port" {
count = var.number_of_k8s_masters
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_master" {
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
count = var.number_of_k8s_masters
availability_zone = element(var.az_list, count.index)
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
content {
uuid = local.image_to_use_master
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
}
}
resource "openstack_networking_port_v2" "k8s_masters_port" {
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
name = "${var.cluster_name}-k8s-${each.key}"
network_id = local.k8s_masters_settings[each.key].network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_masters" {
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
name = "${var.cluster_name}-k8s-${each.key}"
availability_zone = each.value.az
image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null
flavor_id = each.value.flavor
key_pair = openstack_compute_keypair_v2.k8s.name
dynamic "block_device" {
for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : []
content {
uuid = block_device.value
source_type = "image"
volume_size = local.k8s_masters_settings[each.key].volume_size
volume_type = local.k8s_masters_settings[each.key].volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = openstack_networking_port_v2.k8s_masters_port[each.key].id
}
dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
}
}
resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
count = var.number_of_k8s_masters_no_etcd
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
count = var.number_of_k8s_masters_no_etcd
availability_zone = element(var.az_list, count.index)
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
content {
uuid = local.image_to_use_master
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
}
}
resource "openstack_networking_port_v2" "etcd_port" {
count = var.number_of_etcd
name = "${var.cluster_name}-etcd-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "etcd" {
name = "${var.cluster_name}-etcd-${count.index + 1}"
count = var.number_of_etcd
availability_zone = element(var.az_list, count.index)
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_etcd
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
content {
uuid = local.image_to_use_master
source_type = "image"
volume_size = var.etcd_root_volume_size_in_gb
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.etcd_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "etcd,no_floating"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
}
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
count = var.number_of_k8s_masters_no_floating_ip
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
count = var.number_of_k8s_masters_no_floating_ip
availability_zone = element(var.az_list, count.index)
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
content {
uuid = local.image_to_use_master
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
}
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
availability_zone = element(var.az_list, count.index)
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
flavor_id = var.flavor_k8s_master
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
content {
uuid = local.image_to_use_master
source_type = "image"
volume_size = var.master_root_volume_size_in_gb
volume_type = var.master_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_master[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
}
resource "openstack_networking_port_v2" "k8s_node_port" {
count = var.number_of_k8s_nodes
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_node" {
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
count = var.number_of_k8s_nodes
availability_zone = element(var.az_list_node, count.index)
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
content {
uuid = local.image_to_use_node
source_type = "image"
volume_size = var.node_root_volume_size_in_gb
volume_type = var.node_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
}
}
resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
count = var.number_of_k8s_nodes_no_floating_ip
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
count = var.number_of_k8s_nodes_no_floating_ip
availability_zone = element(var.az_list_node, count.index)
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
flavor_id = var.flavor_k8s_node
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
content {
uuid = local.image_to_use_node
source_type = "image"
volume_size = var.node_root_volume_size_in_gb
volume_type = var.node_volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []
content {
group = scheduler_hints.value
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
}
resource "openstack_networking_port_v2" "k8s_nodes_port" {
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
name = "${var.cluster_name}-k8s-node-${each.key}"
network_id = local.k8s_nodes_settings[each.key].network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
name = "${var.cluster_name}-k8s-node-${each.key}"
availability_zone = each.value.az
image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null
flavor_id = each.value.flavor
key_pair = openstack_compute_keypair_v2.k8s.name
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
extra_partitions = each.value.cloudinit.extra_partitions,
netplan_critical_dhcp_interface = each.value.cloudinit.netplan_critical_dhcp_interface,
}) : data.cloudinit_config.cloudinit.rendered
dynamic "block_device" {
for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : []
content {
uuid = block_device.value
source_type = "image"
volume_size = local.k8s_nodes_settings[each.key].volume_size
volume_type = local.k8s_nodes_settings[each.key].volume_type
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = openstack_networking_port_v2.k8s_nodes_port[each.key].id
}
dynamic "scheduler_hints" {
for_each = local.k8s_nodes_settings[each.key].server_group
content {
group = scheduler_hints.value
}
}
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
provisioner "local-exec" {
command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
}
}
resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
count = var.number_of_gfs_nodes_no_floating_ip
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
admin_state_up = "true"
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
no_security_groups = var.port_security_enabled ? null : false
dynamic "fixed_ip" {
for_each = var.private_subnet_id == "" ? [] : [true]
content {
subnet_id = var.private_subnet_id
}
}
depends_on = [
var.network_router_id
]
}
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
count = var.number_of_gfs_nodes_no_floating_ip
availability_zone = element(var.az_list, count.index)
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
flavor_id = var.flavor_gfs_node
key_pair = openstack_compute_keypair_v2.k8s.name
dynamic "block_device" {
for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : []
content {
uuid = local.image_to_use_gfs
source_type = "image"
volume_size = var.gfs_root_volume_size_in_gb
boot_index = 0
destination_type = "volume"
delete_on_termination = true
}
}
network {
port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index)
}
dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
content {
group = openstack_compute_servergroup_v2.k8s_node[0].id
}
}
metadata = {
ssh_user = var.ssh_user_gfs
kubespray_groups = "gfs-cluster,network-storage,no_floating"
depends_on = var.network_router_id
use_access_ip = var.use_access_ip
}
}
resource "openstack_networking_floatingip_associate_v2" "bastion" {
count = var.number_of_bastions
floating_ip = var.bastion_fips[count.index]
port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
}
resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
count = var.number_of_k8s_masters
floating_ip = var.k8s_master_fips[count.index]
port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
}
resource "openstack_networking_floatingip_associate_v2" "k8s_masters" {
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
floating_ip = var.k8s_masters_fips[each.key].address
port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id
}
resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
floating_ip = var.k8s_master_no_etcd_fips[count.index]
port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
}
resource "openstack_networking_floatingip_associate_v2" "k8s_node" {
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
floating_ip = var.k8s_node_fips[count.index]
port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
}
resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
floating_ip = var.k8s_nodes_fips[each.key].address
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
}
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
description = "Non-ephemeral volume for GlusterFS"
size = var.gfs_volume_size_in_gb
}
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
count = var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0
instance_id = element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)
volume_id = element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)
}

View File

@@ -0,0 +1,54 @@
%{~ if length(extra_partitions) > 0 || netplan_critical_dhcp_interface != "" }
#cloud-config
bootcmd:
%{~ for idx, partition in extra_partitions }
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ]
- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ]
- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ]
%{~ endfor }
runcmd:
%{~ if netplan_critical_dhcp_interface != "" }
- netplan apply
%{~ endif }
%{~ for idx, partition in extra_partitions }
- mkdir -p ${partition.mount_path}
- chown nobody:nogroup ${partition.mount_path}
- mount ${partition.partition_path} ${partition.mount_path}
%{~ endfor ~}
%{~ if netplan_critical_dhcp_interface != "" }
write_files:
- path: /etc/netplan/90-critical-dhcp.yaml
content: |
network:
version: 2
ethernets:
${ netplan_critical_dhcp_interface }:
dhcp4: true
critical: true
%{~ endif }
mounts:
%{~ for idx, partition in extra_partitions }
- [ ${partition.partition_path}, ${partition.mount_path} ]
%{~ endfor }
%{~ else ~}
# yamllint disable rule:comments
#cloud-config
## in some cases novnc console access is required
## it requires ssh password to be set
#ssh_pwauth: yes
#chpasswd:
# list: |
# root:secret
# expire: False
## in some cases direct root ssh access via ssh key is required
#disable_root: false
## in some cases additional CA certs are required
#ca-certs:
# trusted: |
# -----BEGIN CERTIFICATE-----
%{~ endif }

View File

@@ -0,0 +1,235 @@
variable "cluster_name" {}
variable "az_list" {
type = list(string)
}
variable "az_list_node" {
type = list(string)
}
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
variable "number_of_etcd" {}
variable "number_of_k8s_masters_no_floating_ip" {}
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {}
variable "number_of_k8s_nodes" {}
variable "number_of_k8s_nodes_no_floating_ip" {}
variable "number_of_bastions" {}
variable "number_of_gfs_nodes_no_floating_ip" {}
variable "bastion_root_volume_size_in_gb" {}
variable "etcd_root_volume_size_in_gb" {}
variable "master_root_volume_size_in_gb" {}
variable "node_root_volume_size_in_gb" {}
variable "gfs_root_volume_size_in_gb" {}
variable "gfs_volume_size_in_gb" {}
variable "master_volume_type" {}
variable "node_volume_type" {}
variable "public_key_path" {}
variable "image" {}
variable "image_gfs" {}
variable "ssh_user" {}
variable "ssh_user_gfs" {}
variable "flavor_k8s_master" {}
variable "flavor_k8s_node" {}
variable "flavor_etcd" {}
variable "flavor_gfs_node" {}
variable "network_name" {}
variable "flavor_bastion" {}
variable "network_id" {
default = ""
}
variable "use_existing_network" {
type = bool
}
variable "network_router_id" {
default = ""
}
variable "k8s_master_fips" {
type = list
}
variable "k8s_master_no_etcd_fips" {
type = list
}
variable "k8s_node_fips" {
type = list
}
variable "k8s_masters_fips" {
type = map
}
variable "k8s_nodes_fips" {
type = map
}
variable "bastion_fips" {
type = list
}
variable "bastion_allowed_remote_ips" {
type = list
}
variable "master_allowed_remote_ips" {
type = list
}
variable "k8s_allowed_remote_ips" {
type = list
}
variable "k8s_allowed_egress_ips" {
type = list
}
variable "k8s_masters" {
type = map(object({
az = string
flavor = string
floating_ip = bool
etcd = bool
image_id = optional(string)
root_volume_size_in_gb = optional(number)
volume_type = optional(string)
network_id = optional(string)
}))
}
variable "k8s_nodes" {
type = map(object({
az = string
flavor = string
floating_ip = bool
extra_groups = optional(string)
image_id = optional(string)
root_volume_size_in_gb = optional(number)
volume_type = optional(string)
network_id = optional(string)
additional_server_groups = optional(list(string))
server_group = optional(string)
cloudinit = optional(object({
extra_partitions = optional(list(object({
volume_path = string
partition_path = string
partition_start = string
partition_end = string
mount_path = string
})), [])
netplan_critical_dhcp_interface = optional(string, "")
}))
}))
}
variable "additional_server_groups" {
type = map(object({
policy = string
}))
}
variable "supplementary_master_groups" {
default = ""
}
variable "supplementary_node_groups" {
default = ""
}
variable "master_allowed_ports" {
type = list
}
variable "worker_allowed_ports" {
type = list
}
variable "bastion_allowed_ports" {
type = list
}
variable "use_access_ip" {}
variable "master_server_group_policy" {
type = string
}
variable "node_server_group_policy" {
type = string
}
variable "etcd_server_group_policy" {
type = string
}
variable "extra_sec_groups" {
type = bool
}
variable "extra_sec_groups_name" {
type = string
}
variable "image_uuid" {
type = string
}
variable "image_gfs_uuid" {
type = string
}
variable "image_master" {
type = string
}
variable "image_master_uuid" {
type = string
}
variable "group_vars_path" {
type = string
}
variable "port_security_enabled" {
type = bool
}
variable "force_null_port_security" {
type = bool
}
variable "private_subnet_id" {
type = string
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
}
}
required_version = ">= 1.3.0"
}

View File

@@ -0,0 +1,46 @@
resource "null_resource" "dummy_dependency" {
triggers = {
dependency_id = var.router_id
}
depends_on = [
var.router_internal_port_id
]
}
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
resource "openstack_networking_floatingip_v2" "k8s_master" {
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}
resource "openstack_networking_floatingip_v2" "k8s_masters" {
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}
resource "openstack_networking_floatingip_v2" "k8s_node" {
count = var.number_of_k8s_nodes
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}
resource "openstack_networking_floatingip_v2" "bastion" {
count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
pool = var.floatingip_pool
depends_on = [null_resource.dummy_dependency]
}

View File

@@ -0,0 +1,25 @@
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
output "k8s_master_fips" {
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
}
output "k8s_masters_fips" {
value = openstack_networking_floatingip_v2.k8s_masters
}
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
output "k8s_master_no_etcd_fips" {
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
}
output "k8s_node_fips" {
value = openstack_networking_floatingip_v2.k8s_node[*].address
}
output "k8s_nodes_fips" {
value = openstack_networking_floatingip_v2.k8s_nodes
}
output "bastion_fips" {
value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address
}

View File

@@ -0,0 +1,27 @@
variable "number_of_k8s_masters" {}
variable "number_of_k8s_masters_no_etcd" {}
variable "number_of_k8s_nodes" {}
variable "floatingip_pool" {}
variable "number_of_bastions" {}
variable "external_net" {}
variable "network_name" {}
variable "router_id" {
default = ""
}
variable "k8s_masters" {}
variable "k8s_nodes" {}
variable "k8s_master_fips" {}
variable "bastion_fips" {}
variable "router_internal_port_id" {}

View File

@@ -0,0 +1,11 @@
terraform {
required_providers {
null = {
source = "hashicorp/null"
}
openstack = {
source = "terraform-provider-openstack/openstack"
}
}
required_version = ">= 0.12.26"
}

Some files were not shown because too many files have changed in this diff Show More