kubespray 2.24 추가

This commit is contained in:
변정훈
2024-02-16 17:08:09 +09:00
parent 1fa9b0df4b
commit f69d904725
1423 changed files with 89069 additions and 2 deletions

View File

@@ -0,0 +1,27 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,45 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}

View File

@@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@@ -0,0 +1,81 @@
#!/usr/bin/env python
from __future__ import print_function
import boto3
import os
import argparse
import json
class SearchEC2Tags(object):
def __init__(self):
self.parse_args()
if self.args.list:
self.search_tags()
if self.args.host:
data = {}
print(json.dumps(data, indent=2))
def parse_args(self):
##Check if VPC_VISIBILITY is set, if not default to private
if "VPC_VISIBILITY" in os.environ:
self.vpc_visibility = os.environ['VPC_VISIBILITY']
else:
self.vpc_visibility = "private"
##Support --list and --host flags. We largely ignore the host one.
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def search_tags(self):
hosts = {}
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube_control_plane", "kube_node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
region = os.environ['AWS_REGION']
ec2 = boto3.resource('ec2', region)
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
cluster_name = os.getenv('CLUSTER_NAME')
if cluster_name:
filters.append({'Name': 'tag-key', 'Values': ['kubernetes.io/cluster/'+cluster_name]})
instances = ec2.instances.filter(Filters=filters)
for instance in instances:
##Suppose default vpc_visibility is private
dns_name = instance.private_dns_name
ansible_host = {
'ansible_ssh_host': instance.private_ip_address
}
##Override when vpc_visibility actually is public
if self.vpc_visibility == "public":
dns_name = instance.public_dns_name
ansible_host = {
'ansible_ssh_host': instance.public_ip_address
}
##Set when instance actually has node_labels
node_labels_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-labels', instance.tags))
if node_labels_tag:
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
##Set when instance actually has node_taints
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
if node_taints_tag:
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
print(json.dumps(hosts, sort_keys=True, indent=2))
SearchEC2Tags()

View File

@@ -0,0 +1 @@
boto3 # Apache-2.0

2
contrib/azurerm/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.generated
/inventory

67
contrib/azurerm/README.md Normal file
View File

@@ -0,0 +1,67 @@
# Kubernetes on Azure with Azure Resource Group Templates
Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates)
## Status
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
## Requirements
- [Install azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
- [Login with azure-cli](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest)
- Dedicated Resource Group created in the Azure Portal or through azure-cli
## Configuration through group_vars/all
You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally
unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public
key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes
experience.
## Bastion host
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
also removes all public IPs from all other VMs.
## Generating and applying
To generate and apply the templates, call:
```shell
./apply-rg.sh <resource_group_name>
```
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
take care about creating/modifying whatever is needed.
## Clearing a resource group
If you need to delete all resources from a resource group, simply call:
```shell
./clear-rg.sh <resource_group_name>
```
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
## Installing Ansible and the dependencies
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
## Generating an inventory for kubespray
After you have applied the templates, you can generate an inventory with this call:
```shell
./generate-inventory.sh <resource_group_name>
```
It will create the file ./inventory which can then be used with kubespray, e.g.:
```shell
cd kubespray-root-dir
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
```

19
contrib/azurerm/apply-rg.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP

14
contrib/azurerm/clear-rg.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
ansible-playbook generate-templates.yml
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e
AZURE_RESOURCE_GROUP="$1"
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
echo "AZURE_RESOURCE_GROUP is missing"
exit 1
fi
# check if azure cli 2.0 exists else use azure cli 1.0
if az &>/dev/null; then
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
elif azure &>/dev/null; then
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
else
echo "Azure cli not found"
fi

View File

@@ -0,0 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: False
roles:
- generate-inventory

View File

@@ -0,0 +1,6 @@
---
- name: Generate Azure inventory
hosts: localhost
gather_facts: False
roles:
- generate-inventory_2

View File

@@ -0,0 +1,6 @@
---
- name: Generate Azure templates
hosts: localhost
gather_facts: False
roles:
- generate-templates

View File

@@ -0,0 +1,51 @@
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
# this name must be globally unique - it will be used as a prefix for azure components
cluster_name: example
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
# node that can be used to access the masters and minions
use_bastion: false
# Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion.<azureregion>.cloudapp.azure.com.
# This is convenient when exceptions have to be configured on a firewall to allow ssh to the given bastion host.
# bastion_domain_prefix: k8s-bastion
number_of_k8s_masters: 3
number_of_k8s_nodes: 3
masters_vm_size: Standard_A2
masters_os_disk_size: 1000
minions_vm_size: Standard_A2
minions_os_disk_size: 1000
admin_username: devops
admin_password: changeme
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
disablePasswordAuthentication: true
# Azure CIDRs
azure_vnet_cidr: 10.0.0.0/8
azure_admin_cidr: 10.241.2.0/24
azure_masters_cidr: 10.0.4.0/24
azure_minions_cidr: 10.240.0.0/16
# Azure loadbalancer port to use to access your cluster
kube_apiserver_port: 6443
# Azure Netwoking and storage naming to use with inventory/all.yml
#azure_virtual_network_name: KubeVNET
#azure_subnet_admin_name: ad-subnet
#azure_subnet_masters_name: master-subnet
#azure_subnet_minions_name: minion-subnet
#azure_route_table_name: routetable
#azure_security_group_name: secgroup
# Storage types available are: "Standard_LRS","Premium_LRS"
#azure_storage_account_type: Standard_LRS

View File

@@ -0,0 +1,15 @@
---
- name: Query Azure VMs
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd
- name: Set vm_list
set_fact:
vm_list: "{{ vm_list_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: 0644

View File

@@ -0,0 +1,33 @@
{% for vm in vm_list %}
{% if not use_bastion or vm.name == 'bastion' %}
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
{% else %}
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
{% endif %}
{% endfor %}
[kube_control_plane]
{% for vm in vm_list %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube_node]
{% for vm in vm_list %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s_cluster:children]
kube_node
kube_control_plane

View File

@@ -0,0 +1,31 @@
---
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd
- name: Set VM IP, roles lists and load balancer public IP
set_fact:
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
vm_roles_list: "{{ vm_list_cmd.stdout }}"
lb_pubip: "{{ lb_pubip_cmd.stdout }}"
- name: Generate inventory
template:
src: inventory.j2
dest: "{{ playbook_dir }}/inventory"
mode: 0644
- name: Generate Load Balancer variables
template:
src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
mode: 0644

View File

@@ -0,0 +1,33 @@
{% for vm in vm_ip_list %}
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% else %}
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
{% endif %}
{% endfor %}
[kube_control_plane]
{% for vm in vm_roles_list %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[etcd]
{% for vm in vm_roles_list %}
{% if 'etcd' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[kube_node]
{% for vm in vm_roles_list %}
{% if 'kube_node' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
[k8s_cluster:children]
kube_node
kube_control_plane

View File

@@ -0,0 +1,8 @@
## External LB example config
apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }}
loadbalancer_apiserver:
address: {{ lb_pubip.ipAddress }}
port: 6443
## Internal loadbalancers for apiservers
loadbalancer_apiserver_localhost: false

View File

@@ -0,0 +1,37 @@
---
apiVersion: "2015-06-15"
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
nameSuffix: "{{ cluster_name }}"
availabilitySetMasters: "master-avs"
availabilitySetMinions: "minion-avs"
faultDomainCount: 3
updateDomainCount: 10
bastionVmSize: Standard_A0
bastionVMName: bastion
bastionIPAddressName: bastion-pubip
disablePasswordAuthentication: true
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
imageReference:
publisher: "OpenLogic"
offer: "CentOS"
sku: "7.5"
version: "latest"
imageReferenceJson: "{{ imageReference | to_json }}"
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"

View File

@@ -0,0 +1,25 @@
---
- name: Set base_dir
set_fact:
base_dir: "{{ playbook_dir }}/.generated/"
- name: Create base_dir
file:
path: "{{ base_dir }}"
state: directory
recurse: true
mode: 0755
- name: Store json files in base_dir
template:
src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}"
mode: 0644
with_items:
- network.json
- storage.json
- availability-sets.json
- bastion.json
- masters.json
- minions.json
- clear-rg.json

View File

@@ -0,0 +1,30 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"type": "Microsoft.Compute/availabilitySets",
"name": "{{availabilitySetMasters}}",
"apiVersion": "{{apiVersion}}",
"location": "[resourceGroup().location]",
"properties": {
"PlatformFaultDomainCount": "{{faultDomainCount}}",
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
}
},
{
"type": "Microsoft.Compute/availabilitySets",
"name": "{{availabilitySetMinions}}",
"apiVersion": "{{apiVersion}}",
"location": "[resourceGroup().location]",
"properties": {
"PlatformFaultDomainCount": "{{faultDomainCount}}",
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
}
}
]
}

View File

@@ -0,0 +1,106 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]"
},
"resources": [
{% if use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "{{bastionIPAddressName}}",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static",
"dnsSettings": {
{% if bastion_domain_prefix %}
"domainNameLabel": "{{ bastion_domain_prefix }}"
{% endif %}
}
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "{{bastionVMName}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]"
],
"properties": {
"ipConfigurations": [
{
"name": "BastionIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]"
},
"subnet": {
"id": "[variables('subnetAdminRef')]"
}
}
}
]
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Compute/virtualMachines",
"name": "{{bastionVMName}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]"
],
"tags": {
"roles": "bastion"
},
"properties": {
"hardwareProfile": {
"vmSize": "{{bastionVmSize}}"
},
"osProfile": {
"computerName": "{{bastionVMName}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "true",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "osdisk",
"vhd": {
"uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]"
}
]
}
}
}
{% endif %}
]
}

View File

@@ -0,0 +1,8 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [],
"outputs": {}
}

View File

@@ -0,0 +1,198 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"lbDomainName": "{{nameSuffix}}-api",
"lbPublicIPAddressName": "kubernetes-api-pubip",
"lbPublicIPAddressType": "Static",
"lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]",
"lbName": "kubernetes-api",
"lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]",
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]"
},
"resources": [
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "[variables('lbPublicIPAddressName')]",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]",
"dnsSettings": {
"domainNameLabel": "[variables('lbDomainName')]"
}
}
},
{
"apiVersion": "{{apiVersion}}",
"name": "[variables('lbName')]",
"type": "Microsoft.Network/loadBalancers",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]"
],
"properties": {
"frontendIPConfigurations": [
{
"name": "kube-api-frontend",
"properties": {
"publicIPAddress": {
"id": "[variables('lbPublicIPAddressID')]"
}
}
}
],
"backendAddressPools": [
{
"name": "kube-api-backend"
}
],
"loadBalancingRules": [
{
"name": "kube-api",
"properties": {
"frontendIPConfiguration": {
"id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]"
},
"backendAddressPool": {
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
},
"protocol": "tcp",
"frontendPort": "{{kube_apiserver_port}}",
"backendPort": "{{kube_apiserver_port}}",
"enableFloatingIP": false,
"idleTimeoutInMinutes": 5,
"probe": {
"id": "[concat(variables('lbID'), '/probes/kube-api')]"
}
}
}
],
"probes": [
{
"name": "kube-api",
"properties": {
"protocol": "tcp",
"port": "{{kube_apiserver_port}}",
"intervalInSeconds": 5,
"numberOfProbes": 2
}
}
]
}
},
{% for i in range(number_of_k8s_masters) %}
{% if not use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "master-{{i}}-pubip",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{% endif %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "master-{{i}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
{% if not use_bastion %}
"[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]",
{% endif %}
"[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
],
"properties": {
"ipConfigurations": [
{
"name": "MastersIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
{% if not use_bastion %}
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]"
},
{% endif %}
"subnet": {
"id": "[variables('kubeMastersSubnetRef')]"
},
"loadBalancerBackendAddressPools": [
{
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
}
]
}
}
],
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
},
"enableIPForwarding": true
}
},
{
"type": "Microsoft.Compute/virtualMachines",
"name": "master-{{i}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
],
"tags": {
"roles": "kube_control_plane,etcd"
},
"apiVersion": "{{apiVersion}}",
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]"
},
"hardwareProfile": {
"vmSize": "{{masters_vm_size}}"
},
"osProfile": {
"computerName": "master-{{i}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "ma{{nameSuffix}}{{i}}",
"vhd": {
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "{{masters_os_disk_size}}"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]"
}
]
}
}
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}

View File

@@ -0,0 +1,115 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
"kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]"
},
"resources": [
{% for i in range(number_of_k8s_nodes) %}
{% if not use_bastion %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/publicIPAddresses",
"name": "minion-{{i}}-pubip",
"location": "[resourceGroup().location]",
"properties": {
"publicIPAllocationMethod": "Static"
}
},
{% endif %}
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkInterfaces",
"name": "minion-{{i}}-nic",
"location": "[resourceGroup().location]",
"dependsOn": [
{% if not use_bastion %}
"[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]"
{% endif %}
],
"properties": {
"ipConfigurations": [
{
"name": "MinionsIpConfig",
"properties": {
"privateIPAllocationMethod": "Dynamic",
{% if not use_bastion %}
"publicIPAddress": {
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]"
},
{% endif %}
"subnet": {
"id": "[variables('kubeMinionsSubnetRef')]"
}
}
}
],
"networkSecurityGroup": {
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
},
"enableIPForwarding": true
}
},
{
"type": "Microsoft.Compute/virtualMachines",
"name": "minion-{{i}}",
"location": "[resourceGroup().location]",
"dependsOn": [
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
],
"tags": {
"roles": "kube_node"
},
"apiVersion": "{{apiVersion}}",
"properties": {
"availabilitySet": {
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]"
},
"hardwareProfile": {
"vmSize": "{{minions_vm_size}}"
},
"osProfile": {
"computerName": "minion-{{i}}",
"adminUsername": "{{admin_username}}",
"adminPassword": "{{admin_password}}",
"linuxConfiguration": {
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
"ssh": {
"publicKeys": [
{% for key in ssh_public_keys %}
{
"path": "{{sshKeyPath}}",
"keyData": "{{key}}"
}{% if loop.index < ssh_public_keys | length %},{% endif %}
{% endfor %}
]
}
}
},
"storageProfile": {
"imageReference": {{imageReferenceJson}},
"osDisk": {
"name": "mi{{nameSuffix}}{{i}}",
"vhd": {
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]"
},
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "{{minions_os_disk_size}}"
}
},
"networkProfile": {
"networkInterfaces": [
{
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]"
}
]
}
}
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}

View File

@@ -0,0 +1,109 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/routeTables",
"name": "{{routeTableName}}",
"location": "[resourceGroup().location]",
"properties": {
"routes": [
]
}
},
{
"type": "Microsoft.Network/virtualNetworks",
"name": "{{virtualNetworkName}}",
"location": "[resourceGroup().location]",
"apiVersion": "{{apiVersion}}",
"dependsOn": [
"[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]"
],
"properties": {
"addressSpace": {
"addressPrefixes": [
"{{azure_vnet_cidr}}"
]
},
"subnets": [
{
"name": "{{subnetMastersName}}",
"properties": {
"addressPrefix": "{{azure_masters_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
},
{
"name": "{{subnetMinionsName}}",
"properties": {
"addressPrefix": "{{azure_minions_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
}
{% if use_bastion %}
,{
"name": "{{subnetAdminName}}",
"properties": {
"addressPrefix": "{{azure_admin_cidr}}",
"routeTable": {
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
}
}
}
{% endif %}
]
}
},
{
"apiVersion": "{{apiVersion}}",
"type": "Microsoft.Network/networkSecurityGroups",
"name": "{{securityGroupName}}",
"location": "[resourceGroup().location]",
"properties": {
"securityRules": [
{% if not use_bastion %}
{
"name": "ssh",
"properties": {
"description": "Allow SSH",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "22",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 100,
"direction": "Inbound"
}
},
{% endif %}
{
"name": "kube-api",
"properties": {
"description": "Allow secure kube-api",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "{{kube_apiserver_port}}",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
"priority": 101,
"direction": "Inbound"
}
}
]
},
"resources": [],
"dependsOn": []
}
]
}

View File

@@ -0,0 +1,19 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
},
"variables": {
},
"resources": [
{
"type": "Microsoft.Storage/storageAccounts",
"name": "{{storageAccountName}}",
"location": "[resourceGroup().location]",
"apiVersion": "{{apiVersion}}",
"properties": {
"accountType": "{{storageAccountType}}"
}
}
]
}

177
contrib/dind/README.md Normal file
View File

@@ -0,0 +1,177 @@
# Kubespray DIND experimental setup
This ansible playbook creates local docker containers
to serve as Kubernetes "nodes", which in turn will run
"normal" Kubernetes docker containers, a mode usually
called DIND (Docker-IN-Docker).
The playbook has two roles:
- dind-host: creates the "nodes" as containers in localhost, with
appropriate settings for DIND (privileged, volume mapping for dind
storage, etc).
- dind-cluster: customizes each node container to have required
system packages installed, and some utils (swapoff, lsattr)
symlinked to /bin/true to ease mimicking a real node.
This playbook has been test with Ubuntu 16.04 as host and ubuntu:16.04
as docker images (note that dind-cluster has specific customization
for these images).
The playbook also creates a `/tmp/kubespray.dind.inventory_builder.sh`
helper (wraps up running `contrib/inventory_builder/inventory.py` with
node containers IPs and prefix).
## Deploying
See below for a complete successful run:
1. Create the node containers
```shell
# From the kubespray root dir
cd contrib/dind
pip install -r requirements.txt
ansible-playbook -i hosts dind-cluster.yaml
# Back to kubespray root
cd ../..
```
NOTE: if the playbook run fails with something like below error
message, you may need to specifically set `ansible_python_interpreter`,
see `./hosts` file for an example expanded localhost entry.
```shell
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
```
2. Customize kubespray-dind.yaml
Note that there's coupling between above created node containers
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
(as set in `group_vars/all/all.yaml`), and docker settings.
```shell
$EDITOR contrib/dind/kubespray-dind.yaml
```
3. Prepare the inventory and run the playbook
```shell
INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
```
NOTE: You could also test other distros without editing files by
passing `--extra-vars` as per below commandline,
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
```shell
cd contrib/dind
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
cd ../..
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
```
## Resulting deployment
See below to get an idea on how a completed deployment looks like,
from the host where you ran kubespray playbooks.
### node_distro: debian
Running from an Ubuntu Xenial host:
```shell
$ uname -a
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1835dd183b75 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node5
30b0af8d2924 debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node4
3e0d1510c62f debian:9.5 "sh -c 'apt-get -qy …" 43 minutes ago Up 43 minutes kube-node3
738993566f94 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node2
c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1
$ docker exec kube-node1 kubectl get node
NAME STATUS ROLES AGE VERSION
kube-node1 Ready master,node 18m v1.12.1
kube-node2 Ready master,node 17m v1.12.1
kube-node3 Ready node 17m v1.12.1
kube-node4 Ready node 17m v1.12.1
kube-node5 Ready node 17m v1.12.1
$ docker exec kube-node1 kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default netchecker-agent-67489 1/1 Running 0 2m51s
default netchecker-agent-6qq6s 1/1 Running 0 2m51s
default netchecker-agent-fsw92 1/1 Running 0 2m51s
default netchecker-agent-fw6tl 1/1 Running 0 2m51s
default netchecker-agent-hostnet-8f2zb 1/1 Running 0 3m
default netchecker-agent-hostnet-gq7ml 1/1 Running 0 3m
default netchecker-agent-hostnet-jfkgv 1/1 Running 0 3m
default netchecker-agent-hostnet-kwfwx 1/1 Running 0 3m
default netchecker-agent-hostnet-r46nm 1/1 Running 0 3m
default netchecker-agent-lxdrn 1/1 Running 0 2m51s
default netchecker-server-864bd4c897-9vstl 1/1 Running 0 2m40s
default sh-68fcc6db45-qf55h 1/1 Running 1 12m
kube-system coredns-7598f59475-6vknq 1/1 Running 0 14m
kube-system coredns-7598f59475-l5q5x 1/1 Running 0 14m
kube-system kube-apiserver-kube-node1 1/1 Running 0 17m
kube-system kube-apiserver-kube-node2 1/1 Running 0 18m
kube-system kube-controller-manager-kube-node1 1/1 Running 0 18m
kube-system kube-controller-manager-kube-node2 1/1 Running 0 18m
kube-system kube-proxy-5xx9d 1/1 Running 0 17m
kube-system kube-proxy-cdqq4 1/1 Running 0 17m
kube-system kube-proxy-n64ls 1/1 Running 0 17m
kube-system kube-proxy-pswmj 1/1 Running 0 18m
kube-system kube-proxy-x89qw 1/1 Running 0 18m
kube-system kube-scheduler-kube-node1 1/1 Running 4 17m
kube-system kube-scheduler-kube-node2 1/1 Running 4 18m
kube-system kubernetes-dashboard-5db4d9f45f-548rl 1/1 Running 0 14m
kube-system nginx-proxy-kube-node3 1/1 Running 4 17m
kube-system nginx-proxy-kube-node4 1/1 Running 4 17m
kube-system nginx-proxy-kube-node5 1/1 Running 4 17m
kube-system weave-net-42bfr 2/2 Running 0 16m
kube-system weave-net-6gt8m 2/2 Running 0 16m
kube-system weave-net-88nnc 2/2 Running 0 16m
kube-system weave-net-shckr 2/2 Running 0 16m
kube-system weave-net-xr46t 2/2 Running 0 16m
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
```
## Using ./run-test-distros.sh
You can use `./run-test-distros.sh` to run a set of tests via DIND,
and excerpt from this script, to get an idea:
```shell
# The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos)
# EXTRAS=(
# 'kube_network_plugin=calico'
# 'kube_network_plugin=flannel'
# 'kube_network_plugin=weave'
# )
# that will be tested in a "combinatory" way (e.g. from above there'll be
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
#
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run.
```
See e.g. `test-some_distros-most_CNIs.env` and
`test-some_distros-kube_router_combo.env` in particular for a richer
set of CNI specific `--extra-vars` combo.

View File

@@ -0,0 +1,11 @@
---
- name: Create nodes as docker containers
hosts: localhost
gather_facts: False
roles:
- { role: dind-host }
- name: Customize each node containers
hosts: containers
roles:
- { role: dind-cluster }

View File

@@ -0,0 +1,3 @@
---
# See distro.yaml for supported node_distro images
node_distro: debian

View File

@@ -0,0 +1,41 @@
---
distro_settings:
debian: &DEBIAN
image: "debian:9.5"
user: "debian"
pid1_exe: /lib/systemd/systemd
init: |
sh -c "apt-get -qy update && apt-get -qy install systemd-sysv dbus && exec /sbin/init"
raw_setup: apt-get -qy update && apt-get -qy install dbus python sudo iproute2
raw_setup_done: test -x /usr/bin/sudo
agetty_svc: getty@*
ssh_service: ssh
extra_packages: []
ubuntu:
<<: *DEBIAN
image: "ubuntu:16.04"
user: "ubuntu"
init: |
/sbin/init
centos: &CENTOS
image: "centos:7"
user: "centos"
pid1_exe: /usr/lib/systemd/systemd
init: |
/sbin/init
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables
raw_setup_done: test -x /usr/bin/sudo
agetty_svc: getty@* serial-getty@*
ssh_service: sshd
extra_packages: []
fedora:
<<: *CENTOS
image: "fedora:latest"
user: "fedora"
raw_setup: yum -qy install policycoreutils dbus python sudo iproute iptables; mkdir -p /etc/modules-load.d
extra_packages:
- hostname
- procps
- findutils
- kmod
- iputils

15
contrib/dind/hosts Normal file
View File

@@ -0,0 +1,15 @@
[local]
# If you created a virtualenv for ansible, you may need to specify running the
# python binary from there instead:
#localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python
localhost ansible_connection=local
[containers]
kube-node1
kube-node2
kube-node3
kube-node4
kube-node5
[containers:vars]
ansible_connection=docker

View File

@@ -0,0 +1,22 @@
---
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
# See contrib/dind/README.md
kube_api_anonymous_auth: true
kubelet_fail_swap_on: false
# Docker nodes need to have been created with same "node_distro: debian"
# at contrib/dind/group_vars/all/all.yaml
bootstrap_os: debian
docker_version: latest
docker_storage_options: -s overlay2 --storage-opt overlay2.override_kernel_check=true -g /dind/docker
dns_mode: coredns
deploy_netchecker: True
netcheck_agent_image_repo: quay.io/l23network/k8s-netchecker-agent
netcheck_server_image_repo: quay.io/l23network/k8s-netchecker-server
netcheck_agent_image_tag: v1.0
netcheck_server_image_tag: v1.0

View File

@@ -0,0 +1 @@
docker

View File

@@ -0,0 +1,73 @@
---
- name: Set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"
- name: Set_fact other distro settings
set_fact:
distro_user: "{{ distro_setup['user'] }}"
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
distro_extra_packages: "{{ distro_setup['extra_packages'] }}"
- name: Null-ify some linux tools to ease DIND
file:
src: "/bin/true"
dest: "{{ item }}"
state: link
force: yes
with_items:
# DIND box may have swap enable, don't bother
- /sbin/swapoff
# /etc/hosts handling would fail on trying to copy file attributes on edit,
# void it by successfully returning nil output
- /usr/bin/lsattr
# disable selinux-isms, sp needed if running on non-Selinux host
- /usr/sbin/semodule
- name: Void installing dpkg docs and man pages on Debian based distros
copy:
content: |
# Delete locales
path-exclude=/usr/share/locale/*
# Delete man pages
path-exclude=/usr/share/man/*
# Delete docs
path-exclude=/usr/share/doc/*
path-include=/usr/share/doc/*/copyright
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
mode: 0644
when:
- ansible_os_family == 'Debian'
- name: Install system packages to better match a full-fledge node
package:
name: "{{ item }}"
state: present
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
- name: Start needed services
service:
name: "{{ item }}"
state: started
with_items:
- rsyslog
- "{{ distro_ssh_service }}"
- name: Create distro user "{{ distro_user }}"
user:
name: "{{ distro_user }}"
uid: 1000
# groups: sudo
append: yes
- name: Allow password-less sudo to "{{ distro_user }}"
copy:
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/{{ distro_user }}"
mode: 0640
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
ansible.posix.authorized_key:
user: "{{ distro_user }}"
state: present
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"

View File

@@ -0,0 +1,87 @@
---
- name: Set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"
- name: Set_fact other distro settings
set_fact:
distro_image: "{{ distro_setup['image'] }}"
distro_init: "{{ distro_setup['init'] }}"
distro_pid1_exe: "{{ distro_setup['pid1_exe'] }}"
distro_raw_setup: "{{ distro_setup['raw_setup'] }}"
distro_raw_setup_done: "{{ distro_setup['raw_setup_done'] }}"
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
- name: Create dind node containers from "containers" inventory section
community.docker.docker_container:
image: "{{ distro_image }}"
name: "{{ item }}"
state: started
hostname: "{{ item }}"
command: "{{ distro_init }}"
# recreate: yes
privileged: true
tmpfs:
- /sys/module/nf_conntrack/parameters
volumes:
- /boot:/boot
- /lib/modules:/lib/modules
- "{{ item }}:/dind/docker"
register: containers
with_items: "{{ groups.containers }}"
tags:
- addresses
- name: Gather list of containers IPs
set_fact:
addresses: "{{ containers.results | map(attribute='ansible_facts') | map(attribute='docker_container') | map(attribute='NetworkSettings') | map(attribute='IPAddress') | list }}"
tags:
- addresses
- name: Create inventory_builder helper already set with the list of node containers' IPs
template:
src: inventory_builder.sh.j2
dest: /tmp/kubespray.dind.inventory_builder.sh
mode: 0755
tags:
- addresses
- name: Install needed packages into node containers via raw, need to wait for possible systemd packages to finish installing
raw: |
# agetty processes churn a lot of cpu time failing on inexistent ttys, early STOP them, to rip them in below task
pkill -STOP agetty || true
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
{{ distro_raw_setup }}
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
with_items: "{{ containers.results }}"
register: result
changed_when: result.stdout.find("SKIPPED") < 0
- name: Remove gettys from node containers
raw: |
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
systemctl disable {{ distro_agetty_svc }}
systemctl stop {{ distro_agetty_svc }}
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
with_items: "{{ containers.results }}"
changed_when: false
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id
cmp /etc/machine-id /etc/machine-id~ || true
systemctl daemon-reload
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
with_items: "{{ containers.results }}"
- name: Early hack image install to adapt for DIND
raw: |
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
with_items: "{{ containers.results }}"
register: result
changed_when: result.stdout.find("removed") >= 0

View File

@@ -0,0 +1,3 @@
#!/bin/bash
# NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section
HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %}

View File

@@ -0,0 +1,93 @@
#!/bin/bash
# Q&D test'em all: creates full DIND kubespray deploys
# for each distro, verifying it via netchecker.
info() {
local msg="$*"
local date="$(date -Isec)"
echo "INFO: [$date] $msg"
}
pass_or_fail() {
local rc="$?"
local msg="$*"
local date="$(date -Isec)"
[ $rc -eq 0 ] && echo "PASS: [$date] $msg" || echo "FAIL: [$date] $msg"
return $rc
}
test_distro() {
local distro=${1:?};shift
local extra="${*:-}"
local prefix="${distro[${extra}]}"
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
pass_or_fail "$prefix: dind-nodes" || return 1
(cd ../..
INVENTORY_DIR=inventory/local-dind
mkdir -p ${INVENTORY_DIR}
rm -f ${INVENTORY_DIR}/hosts.ini
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
# expand $extra with -e in front of each word
extra_args=""; for extra_arg in $extra; do extra_args="$extra_args -e $extra_arg"; done
ansible-playbook --become -e ansible_ssh_user=$distro -i \
${INVENTORY_DIR}/hosts.ini cluster.yml \
-e @contrib/dind/kubespray-dind.yaml -e bootstrap_os=$distro ${extra_args}
pass_or_fail "$prefix: kubespray"
) || return 1
local node0=${NODES[0]}
docker exec ${node0} kubectl get pod --all-namespaces
pass_or_fail "$prefix: kube-api" || return 1
let retries=60
while ((retries--)); do
# Some CNI may set NodePort on "main" node interface address (thus no localhost NodePort)
# e.g. kube-router: https://github.com/cloudnativelabs/kube-router/pull/217
docker exec ${node0} curl -m2 -s http://${NETCHECKER_HOST:?}:31081/api/v1/connectivity_check | grep successfully && break
sleep 2
done
[ $retries -ge 0 ]
pass_or_fail "$prefix: netcheck" || return 1
}
NODES=($(egrep ^kube_node hosts))
NETCHECKER_HOST=localhost
: ${OUTPUT_DIR:=./out}
mkdir -p ${OUTPUT_DIR}
# The SPEC file(s) must have two arrays as e.g.
# DISTROS=(debian centos)
# EXTRAS=(
# 'kube_network_plugin=calico'
# 'kube_network_plugin=flannel'
# 'kube_network_plugin=weave'
# )
# that will be tested in a "combinatory" way (e.g. from above there'll be
# be 6 test runs), creating a sequenced <spec_filename>-nn.out with each output.
#
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
# to main kubespray ansible-playbook run.
SPECS=${*:?Missing SPEC files, e.g. test-most_distros-some_CNIs.env}
for spec in ${SPECS}; do
unset DISTROS EXTRAS
echo "Loading file=${spec} ..."
. ${spec} || continue
: ${DISTROS:?} || continue
echo "DISTROS:" "${DISTROS[@]}"
echo "EXTRAS->"
printf " %s\n" "${EXTRAS[@]}"
let n=1
for distro in "${DISTROS[@]}"; do
for extra in "${EXTRAS[@]:-NULL}"; do
# Magic value to let this for run once:
[[ ${extra} == NULL ]] && unset extra
docker rm -f "${NODES[@]}"
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
{
info "${distro}[${extra}] START: file_out=${file_out}"
time test_distro ${distro} ${extra}
} |& tee ${file_out}
# sleeping for the sake of the human to verify if they want
sleep 2m
done
done
done
egrep -H '^(....:|real)' $(ls -tr ${OUTPUT_DIR}/*.out)

View File

@@ -0,0 +1,11 @@
# Test spec file: used from ./run-test-distros.sh, will run
# each distro in $DISTROS overloading main kubespray ansible-playbook run
# Get all DISTROS from distro.yaml (shame no yaml parsing, but nuff anyway)
# DISTROS="${*:-$(egrep -o '^ \w+' group_vars/all/distro.yaml|paste -s)}"
DISTROS=(debian ubuntu centos fedora)
# Each line below will be added as --extra-vars to main playbook run
EXTRAS=(
'kube_network_plugin=calico'
'kube_network_plugin=weave'
)

View File

@@ -0,0 +1,6 @@
DISTROS=(debian centos)
NETCHECKER_HOST=${NODES[0]}
EXTRAS=(
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
)

View File

@@ -0,0 +1,8 @@
DISTROS=(debian centos)
EXTRAS=(
'kube_network_plugin=calico {}'
'kube_network_plugin=canal {}'
'kube_network_plugin=cilium {}'
'kube_network_plugin=flannel {}'
'kube_network_plugin=weave {}'
)

View File

@@ -0,0 +1,480 @@
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage: inventory.py ip1 [ip2 ...]
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
#
# Advanced usage:
# Add another host after initial creation: inventory.py 10.10.1.5
# Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
# Add hosts with different ip and access ip:
# inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.1.3
# Add hosts with a specific hostname, ip, and optional access ip:
# inventory.py first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
# Delete a host: inventory.py -10.10.1.3
# Delete a host by id: inventory.py -node1
#
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
# YAML file should be in the following format:
# group1:
# host1:
# ip: X.X.X.X
# var: val
# group2:
# host2:
# ip: X.X.X.X
from collections import OrderedDict
from ipaddress import ip_address
from ruamel.yaml import YAML
import os
import re
import subprocess
import sys
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
'calico_rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
'load', 'add']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
yaml = YAML()
yaml.Representer.add_representer(OrderedDict, yaml.Representer.represent_dict)
def get_var_as_bool(name, default):
value = os.environ.get(name, '')
return _boolean_states.get(value.lower(), default)
# Configurable as shell vars start
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
os.environ.get("KUBE_MASTERS", 2)))
# Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
DEBUG = get_var_as_bool("DEBUG", True)
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
USE_REAL_HOSTNAME = get_var_as_bool("USE_REAL_HOSTNAME", False)
# Configurable as shell vars end
class KubesprayInventory(object):
def __init__(self, changed_hosts=None, config_file=None):
self.config_file = config_file
self.yaml_config = {}
loadPreviousConfig = False
printHostnames = False
# See whether there are any commands to process
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
if changed_hosts[0] == "add":
loadPreviousConfig = True
changed_hosts = changed_hosts[1:]
elif changed_hosts[0] == "print_hostnames":
loadPreviousConfig = True
printHostnames = True
else:
self.parse_command(changed_hosts[0], changed_hosts[1:])
sys.exit(0)
# If the user wants to remove a node, we need to load the config anyway
if changed_hosts and changed_hosts[0][0] == "-":
loadPreviousConfig = True
if self.config_file and loadPreviousConfig: # Load previous YAML file
try:
self.hosts_file = open(config_file, 'r')
self.yaml_config = yaml.load(self.hosts_file)
except OSError as e:
# I am assuming we are catching "cannot open file" exceptions
print(e)
sys.exit(1)
if printHostnames:
self.print_hostnames()
sys.exit(0)
self.ensure_required_groups(ROLES)
if changed_hosts:
changed_hosts = self.range2ips(changed_hosts)
self.hosts = self.build_hostnames(changed_hosts,
loadPreviousConfig)
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
self.set_all(self.hosts)
self.set_k8s_cluster()
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_control_plane(list(self.hosts.keys())[
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
else:
self.set_kube_control_plane(
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
else: # Show help if no options
self.show_help()
sys.exit(0)
self.write_config(self.config_file)
def write_config(self, config_file):
if config_file:
with open(self.config_file, 'w') as f:
yaml.dump(self.yaml_config, f)
else:
print("WARNING: Unable to save config. Make sure you set "
"CONFIG_FILE env var.")
def debug(self, msg):
if DEBUG:
print("DEBUG: {0}".format(msg))
def get_ip_from_opts(self, optstring):
if 'ip' in optstring:
return optstring['ip']
else:
raise ValueError("IP parameter not found in options")
def ensure_required_groups(self, groups):
for group in groups:
if group == 'all':
self.debug("Adding group {0}".format(group))
if group not in self.yaml_config:
all_dict = OrderedDict([('hosts', OrderedDict({})),
('children', OrderedDict({}))])
self.yaml_config = {'all': all_dict}
else:
self.debug("Adding group {0}".format(group))
if group not in self.yaml_config['all']['children']:
self.yaml_config['all']['children'][group] = {'hosts': {}}
def get_host_id(self, host):
'''Returns integer host ID (without padding) from a given hostname.'''
try:
short_hostname = host.split('.')[0]
return int(re.findall("\\d+$", short_hostname)[-1])
except IndexError:
raise ValueError("Host name must end in an integer")
# Keeps already specified hosts,
# and adds or removes the hosts provided as an argument
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
existing_hosts = OrderedDict()
highest_host_id = 0
# Load already existing hosts from the YAML
if loadPreviousConfig:
try:
for host in self.yaml_config['all']['hosts']:
# Read configuration of an existing host
hostConfig = self.yaml_config['all']['hosts'][host]
existing_hosts[host] = hostConfig
# If the existing host seems
# to have been created automatically, detect its ID
if host.startswith(HOST_PREFIX):
host_id = self.get_host_id(host)
if host_id > highest_host_id:
highest_host_id = host_id
except Exception as e:
# I am assuming we are catching automatically
# created hosts without IDs
print(e)
sys.exit(1)
# FIXME(mattymo): Fix condition where delete then add reuses highest id
next_host_id = highest_host_id + 1
next_host = ""
all_hosts = existing_hosts.copy()
for host in changed_hosts:
# Delete the host from config the hostname/IP has a "-" prefix
if host[0] == "-":
realhost = host[1:]
if self.exists_hostname(all_hosts, realhost):
self.debug("Marked {0} for deletion.".format(realhost))
all_hosts.pop(realhost)
elif self.exists_ip(all_hosts, realhost):
self.debug("Marked {0} for deletion.".format(realhost))
self.delete_host_by_ip(all_hosts, realhost)
# Host/Argument starts with a digit,
# then we assume its an IP address
elif host[0].isdigit():
if ',' in host:
ip, access_ip = host.split(',')
else:
ip = host
access_ip = host
if self.exists_hostname(all_hosts, host):
self.debug("Skipping existing host {0}.".format(host))
continue
elif self.exists_ip(all_hosts, ip):
self.debug("Skipping existing host {0}.".format(ip))
continue
if USE_REAL_HOSTNAME:
cmd = ("ssh -oStrictHostKeyChecking=no "
+ access_ip + " 'hostname -s'")
next_host = subprocess.check_output(cmd, shell=True)
next_host = next_host.strip().decode('ascii')
else:
# Generates a hostname because we have only an IP address
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1
# Uses automatically generated node name
# in case we dont provide it.
all_hosts[next_host] = {'ansible_host': access_ip,
'ip': ip,
'access_ip': access_ip}
# Host/Argument starts with a letter, then we assume its a hostname
elif host[0].isalpha():
if ',' in host:
try:
hostname, ip, access_ip = host.split(',')
except Exception:
hostname, ip = host.split(',')
access_ip = ip
if self.exists_hostname(all_hosts, host):
self.debug("Skipping existing host {0}.".format(host))
continue
elif self.exists_ip(all_hosts, ip):
self.debug("Skipping existing host {0}.".format(ip))
continue
all_hosts[hostname] = {'ansible_host': access_ip,
'ip': ip,
'access_ip': access_ip}
return all_hosts
# Expand IP ranges into individual addresses
def range2ips(self, hosts):
reworked_hosts = []
def ips(start_address, end_address):
try:
# Python 3.x
start = int(ip_address(start_address))
end = int(ip_address(end_address))
except Exception:
# Python 2.7
start = int(ip_address(str(start_address)))
end = int(ip_address(str(end_address)))
return [ip_address(ip).exploded for ip in range(start, end + 1)]
for host in hosts:
if '-' in host and not (host.startswith('-') or host[0].isalpha()):
start, end = host.strip().split('-')
try:
reworked_hosts.extend(ips(start, end))
except ValueError:
raise Exception("Range of ip_addresses isn't valid")
else:
reworked_hosts.append(host)
return reworked_hosts
def exists_hostname(self, existing_hosts, hostname):
return hostname in existing_hosts.keys()
def exists_ip(self, existing_hosts, ip):
for host_opts in existing_hosts.values():
if ip == self.get_ip_from_opts(host_opts):
return True
return False
def delete_host_by_ip(self, existing_hosts, ip):
for hostname, host_opts in existing_hosts.items():
if ip == self.get_ip_from_opts(host_opts):
del existing_hosts[hostname]
return
raise ValueError("Unable to find host by IP: {0}".format(ip))
def purge_invalid_hosts(self, hostnames, protected_names=[]):
for role in self.yaml_config['all']['children']:
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
for host in all_hosts.keys():
if host not in hostnames and host not in protected_names:
self.debug(
"Host {0} removed from role {1}".format(host, role)) # noqa
del self.yaml_config['all']['children'][role]['hosts'][host] # noqa
# purge from all
if self.yaml_config['all']['hosts']:
all_hosts = self.yaml_config['all']['hosts'].copy()
for host in all_hosts.keys():
if host not in hostnames and host not in protected_names:
self.debug("Host {0} removed from role all".format(host))
del self.yaml_config['all']['hosts'][host]
def add_host_to_group(self, group, host, opts=""):
self.debug("adding host {0} to group {1}".format(host, group))
if group == 'all':
if self.yaml_config['all']['hosts'] is None:
self.yaml_config['all']['hosts'] = {host: None}
self.yaml_config['all']['hosts'][host] = opts
elif group != 'k8s_cluster:children':
if self.yaml_config['all']['children'][group]['hosts'] is None:
self.yaml_config['all']['children'][group]['hosts'] = {
host: None}
else:
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
def set_kube_control_plane(self, hosts):
for host in hosts:
self.add_host_to_group('kube_control_plane', host)
def set_all(self, hosts):
for host, opts in hosts.items():
self.add_host_to_group('all', host, opts)
def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube_control_plane': None,
'kube_node': None}}
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
def set_calico_rr(self, hosts):
for host in hosts:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
self.debug("Not adding {0} to calico_rr group because it "
"conflicts with kube_control_plane "
"group".format(host))
continue
if host in self.yaml_config['all']['children']['kube_node']:
self.debug("Not adding {0} to calico_rr group because it "
"conflicts with kube_node group".format(host))
continue
self.add_host_to_group('calico_rr', host)
def set_kube_node(self, hosts):
for host in hosts:
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of "
"scale deployment and host is in etcd "
"group.".format(host))
continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
self.debug("Not adding {0} to kube_node group because of "
"scale deployment and host is in "
"kube_control_plane group.".format(host))
continue
self.add_host_to_group('kube_node', host)
def set_etcd(self, hosts):
for host in hosts:
self.add_host_to_group('etcd', host)
def load_file(self, files=None):
'''Directly loads JSON to inventory.'''
if not files:
raise Exception("No input file specified.")
import json
for filename in list(files):
# Try JSON
try:
with open(filename, 'r') as f:
data = json.load(f)
except ValueError:
raise Exception("Cannot read %s as JSON, or CSV", filename)
self.ensure_required_groups(ROLES)
self.set_k8s_cluster()
for group, hosts in data.items():
self.ensure_required_groups([group])
for host, opts in hosts.items():
optstring = {'ansible_host': opts['ip'],
'ip': opts['ip'],
'access_ip': opts['ip']}
self.add_host_to_group('all', host, optstring)
self.add_host_to_group(group, host)
self.write_config(self.config_file)
def parse_command(self, command, args=None):
if command == 'help':
self.show_help()
elif command == 'print_cfg':
self.print_config()
elif command == 'print_ips':
self.print_ips()
elif command == 'print_hostnames':
self.print_hostnames()
elif command == 'load':
self.load_file(args)
else:
raise Exception("Invalid command specified.")
def show_help(self):
help_text = '''Usage: inventory.py ip1 [ip2 ...]
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
Available commands:
help - Display this message
print_cfg - Write inventory file to stdout
print_ips - Write a space-delimited list of IPs from "all" group
print_hostnames - Write a space-delimited list of Hostnames from "all" group
add - Adds specified hosts into an already existing inventory
Advanced usage:
Create new or overwrite old inventory file: inventory.py 10.10.1.5
Add another host after initial creation: inventory.py add 10.10.1.6
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
Delete a host: inventory.py -10.10.1.3
Delete a host by id: inventory.py -node1
Configurable env vars:
DEBUG Enable debug printing. Default: True
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
HOST_PREFIX Host prefix for generated hosts. Default: node
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
''' # noqa
print(help_text)
def print_config(self):
yaml.dump(self.yaml_config, sys.stdout)
def print_hostnames(self):
print(' '.join(self.yaml_config['all']['hosts'].keys()))
def print_ips(self):
ips = []
for host, opts in self.yaml_config['all']['hosts'].items():
ips.append(self.get_ip_from_opts(opts))
print(' '.join(ips))
def main(argv=None):
if not argv:
argv = sys.argv[1:]
KubesprayInventory(argv, CONFIG_FILE)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,3 @@
configparser>=3.3.0
ipaddress
ruamel.yaml>=0.15.88

View File

@@ -0,0 +1,3 @@
[metadata]
name = kubespray-inventory-builder
version = 0.1

View File

@@ -0,0 +1,29 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=[],
pbr=False)

View File

@@ -0,0 +1,3 @@
hacking>=0.10.2
mock>=1.3.0
pytest>=2.8.0

View File

@@ -0,0 +1,595 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inventory
from io import StringIO
import unittest
from unittest import mock
from collections import OrderedDict
import sys
path = "./contrib/inventory_builder/"
if path not in sys.path:
sys.path.append(path)
import inventory # noqa
class TestInventoryPrintHostnames(unittest.TestCase):
@mock.patch('ruamel.yaml.YAML.load')
def test_print_hostnames(self, load_mock):
mock_io = mock.mock_open(read_data='')
load_mock.return_value = OrderedDict({'all': {'hosts': {
'node1': {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'},
'node2': {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'}}}})
with mock.patch('builtins.open', mock_io):
with self.assertRaises(SystemExit) as cm:
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
inventory.KubesprayInventory(
changed_hosts=["print_hostnames"],
config_file="file")
self.assertEqual("node1 node2\n", stdout.getvalue())
self.assertEqual(cm.exception.code, 0)
class TestInventory(unittest.TestCase):
@mock.patch('inventory.sys')
def setUp(self, sys_mock):
sys_mock.exit = mock.Mock()
super(TestInventory, self).setUp()
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
self.inv = inventory.KubesprayInventory()
def test_get_ip_from_opts(self):
optstring = {'ansible_host': '10.90.3.2',
'ip': '10.90.3.2',
'access_ip': '10.90.3.2'}
expected = "10.90.3.2"
result = self.inv.get_ip_from_opts(optstring)
self.assertEqual(expected, result)
def test_get_ip_from_opts_invalid(self):
optstring = "notanaddr=value something random!chars:D"
self.assertRaisesRegex(ValueError, "IP parameter not found",
self.inv.get_ip_from_opts, optstring)
def test_ensure_required_groups(self):
groups = ['group1', 'group2']
self.inv.ensure_required_groups(groups)
for group in groups:
self.assertIn(group, self.inv.yaml_config['all']['children'])
def test_get_host_id(self):
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
'node3.xyz123.aaa']
expected = [99, 1, 1, 1, 3]
for hostname, expected in zip(hostnames, expected):
result = self.inv.get_host_id(hostname)
self.assertEqual(expected, result)
def test_get_host_id_invalid(self):
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
for hostname in bad_hostnames:
self.assertRaisesRegex(ValueError, "Host name must end in an",
self.inv.get_host_id, hostname)
def test_build_hostnames_add_duplicate(self):
changed_hosts = ['10.90.0.2']
expected = OrderedDict([('node3',
{'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'})])
self.inv.yaml_config['all']['hosts'] = expected
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two(self):
changed_hosts = ['10.90.0.2', '10.90.0.3']
expected = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.inv.yaml_config['all']['hosts'] = OrderedDict()
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_add_three(self):
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
expected = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'}),
('node3', {'ansible_host': '10.90.0.4',
'ip': '10.90.0.4',
'access_ip': '10.90.0.4'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_add_one(self):
changed_hosts = ['10.90.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_delete_first(self):
changed_hosts = ['-10.90.0.2']
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts
expected = OrderedDict([
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_delete_by_hostname(self):
changed_hosts = ['-node1']
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts
expected = OrderedDict([
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_exists_hostname_positive(self):
hostname = 'node1'
expected = True
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.exists_hostname(existing_hosts, hostname)
self.assertEqual(expected, result)
def test_exists_hostname_negative(self):
hostname = 'node99'
expected = False
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.exists_hostname(existing_hosts, hostname)
self.assertEqual(expected, result)
def test_exists_ip_positive(self):
ip = '10.90.0.2'
expected = True
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.exists_ip(existing_hosts, ip)
self.assertEqual(expected, result)
def test_exists_ip_negative(self):
ip = '10.90.0.200'
expected = False
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
result = self.inv.exists_ip(existing_hosts, ip)
self.assertEqual(expected, result)
def test_delete_host_by_ip_positive(self):
ip = '10.90.0.2'
expected = OrderedDict([
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.inv.delete_host_by_ip(existing_hosts, ip)
self.assertEqual(expected, existing_hosts)
def test_delete_host_by_ip_negative(self):
ip = '10.90.0.200'
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'})])
self.assertRaisesRegex(ValueError, "Unable to find host",
self.inv.delete_host_by_ip, existing_hosts, ip)
def test_purge_invalid_hosts(self):
proper_hostnames = ['node1', 'node2']
bad_host = 'doesnotbelong2'
existing_hosts = OrderedDict([
('node1', {'ansible_host': '10.90.0.2',
'ip': '10.90.0.2',
'access_ip': '10.90.0.2'}),
('node2', {'ansible_host': '10.90.0.3',
'ip': '10.90.0.3',
'access_ip': '10.90.0.3'}),
('doesnotbelong2', {'whateveropts=ilike'})])
self.inv.yaml_config['all']['hosts'] = existing_hosts
self.inv.purge_invalid_hosts(proper_hostnames)
self.assertNotIn(
bad_host, self.inv.yaml_config['all']['hosts'].keys())
def test_add_host_to_group(self):
group = 'etcd'
host = 'node1'
opts = {'ip': '10.90.0.2'}
self.inv.add_host_to_group(group, host, opts)
self.assertEqual(
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
None)
def test_set_kube_control_plane(self):
group = 'kube_control_plane'
host = 'node1'
self.inv.set_kube_control_plane([host])
self.assertIn(
host, self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_all(self):
hosts = OrderedDict([
('node1', 'opt1'),
('node2', 'opt2')])
self.inv.set_all(hosts)
for host, opt in hosts.items():
self.assertEqual(
self.inv.yaml_config['all']['hosts'].get(host), opt)
def test_set_k8s_cluster(self):
group = 'k8s_cluster'
expected_hosts = ['kube_node', 'kube_control_plane']
self.inv.set_k8s_cluster()
for host in expected_hosts:
self.assertIn(
host,
self.inv.yaml_config['all']['children'][group]['children'])
def test_set_kube_node(self):
group = 'kube_node'
host = 'node1'
self.inv.set_kube_node([host])
self.assertIn(
host, self.inv.yaml_config['all']['children'][group]['hosts'])
def test_set_etcd(self):
group = 'etcd'
host = 'node1'
self.inv.set_etcd([host])
self.assertIn(
host, self.inv.yaml_config['all']['children'][group]['hosts'])
def test_scale_scenario_one(self):
num_nodes = 50
hosts = OrderedDict()
for hostid in range(1, num_nodes+1):
hosts["node" + str(hostid)] = ""
self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
self.inv.set_kube_node(hosts.keys())
for h in range(3):
self.assertFalse(
list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
def test_scale_scenario_two(self):
num_nodes = 500
hosts = OrderedDict()
for hostid in range(1, num_nodes+1):
hosts["node" + str(hostid)] = ""
self.inv.set_all(hosts)
self.inv.set_etcd(list(hosts.keys())[0:3])
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
self.inv.set_kube_node(hosts.keys())
for h in range(5):
self.assertFalse(
list(hosts.keys())[h] in
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
def test_range2ips_range(self):
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
expected = ['10.90.0.2',
'10.90.0.4',
'10.90.0.5',
'10.90.0.6',
'10.90.0.8']
result = self.inv.range2ips(changed_hosts)
self.assertEqual(expected, result)
def test_range2ips_incorrect_range(self):
host_range = ['10.90.0.4-a.9b.c.e']
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
self.inv.range2ips, host_range)
def test_build_hostnames_create_with_one_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_create_with_two_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
expected = OrderedDict([
('node1', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node2', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_create_with_three_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2',
'10.90.0.3,192.168.0.3',
'10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node1', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node2', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node3', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_overwrite_one_with_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = OrderedDict([('node5',
{'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_overwrite_three_with_different_ips(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node1',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = OrderedDict([
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts)
self.assertEqual(expected, result)
def test_build_hostnames_different_ips_add_duplicate(self):
changed_hosts = ['10.90.0.2,192.168.0.2']
expected = OrderedDict([('node3',
{'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
existing = expected
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'}),
('node6', {'ansible_host': '192.168.0.6',
'ip': '10.90.0.6',
'access_ip': '192.168.0.6'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
# Add two IP addresses into a config that has
# three already defined IP addresses. One of the IP addresses
# is a duplicate.
def test_build_hostnames_add_two_duplicate_one_overlap(self):
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'}),
('node5', {'ansible_host': '192.168.0.5',
'ip': '10.90.0.5',
'access_ip': '192.168.0.5'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)
# Add two duplicate IP addresses into a config that has
# three already defined IP addresses
def test_build_hostnames_add_two_duplicate_two_overlap(self):
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
expected = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
existing = OrderedDict([
('node2', {'ansible_host': '192.168.0.2',
'ip': '10.90.0.2',
'access_ip': '192.168.0.2'}),
('node3', {'ansible_host': '192.168.0.3',
'ip': '10.90.0.3',
'access_ip': '192.168.0.3'}),
('node4', {'ansible_host': '192.168.0.4',
'ip': '10.90.0.4',
'access_ip': '192.168.0.4'})])
self.inv.yaml_config['all']['hosts'] = existing
result = self.inv.build_hostnames(changed_hosts, True)
self.assertEqual(expected, result)

View File

@@ -0,0 +1,34 @@
[tox]
minversion = 1.6
skipsdist = True
envlist = pep8
[testenv]
allowlist_externals = py.test
usedevelop = True
deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
setenv = VIRTUAL_ENV={envdir}
passenv =
http_proxy
HTTP_PROXY
https_proxy
HTTPS_PROXY
no_proxy
NO_PROXY
commands = pytest -vv #{posargs:./tests}
[testenv:pep8]
usedevelop = False
allowlist_externals = bash
commands =
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
[testenv:venv]
commands = {posargs}
[flake8]
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg

View File

@@ -0,0 +1,11 @@
# Kubespray on KVM Virtual Machines hypervisor preparation
A simple playbook to ensure your system has the right settings to enable Kubespray
deployment on VMs.
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
## User creation
If you want to create a user for running Kubespray deployment, you should specify
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.

View File

@@ -0,0 +1,2 @@
#k8s_deployment_user: kubespray
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa

View File

@@ -0,0 +1,9 @@
---
- name: Prepare Hypervisor to later install kubespray VMs
hosts: localhost
gather_facts: False
become: yes
vars:
bootstrap_os: none
roles:
- { role: kvm-setup }

View File

@@ -0,0 +1,30 @@
---
- name: Install required packages
package:
name: "{{ item }}"
state: present
with_items:
- bind-utils
- ntp
when: ansible_os_family == "RedHat"
- name: Install required packages
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 3600
name: "{{ item }}"
state: present
install_recommends: no
with_items:
- dnsutils
- ntp
when: ansible_os_family == "Debian"
- name: Create deployment user if required
include_tasks: user.yml
when: k8s_deployment_user is defined
- name: Set proper sysctl values
import_tasks: sysctl.yml

View File

@@ -0,0 +1,46 @@
---
- name: Load br_netfilter module
community.general.modprobe:
name: br_netfilter
state: present
register: br_netfilter
- name: Add br_netfilter into /etc/modules
lineinfile:
dest: /etc/modules
state: present
line: 'br_netfilter'
when: br_netfilter is defined and ansible_os_family == 'Debian'
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
copy:
dest: /etc/modules-load.d/kubespray.conf
content: |-
### This file is managed by Ansible
br-netfilter
owner: root
group: root
mode: 0644
when: br_netfilter is defined
- name: Enable net.ipv4.ip_forward in sysctl
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
- name: Set bridge-nf-call-{arptables,iptables} to 0
ansible.posix.sysctl:
name: "{{ item }}"
state: present
value: 0
sysctl_file: "{{ sysctl_file_path }}"
reload: yes
with_items:
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
- net.bridge.bridge-nf-call-iptables
when: br_netfilter is defined

View File

@@ -0,0 +1,47 @@
---
- name: Create user {{ k8s_deployment_user }}
user:
name: "{{ k8s_deployment_user }}"
groups: adm
shell: /bin/bash
- name: Ensure that .ssh exists
file:
path: "/home/{{ k8s_deployment_user }}/.ssh"
state: directory
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
mode: 0700
- name: Configure sudo for deployment user
copy:
content: |
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
dest: "/etc/sudoers.d/55-k8s-deployment"
owner: root
group: root
mode: 0644
- name: Write private SSH key
copy:
src: "{{ k8s_deployment_user_pkey_path }}"
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
mode: 0400
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined
- name: Write public SSH key
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
args:
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
when: k8s_deployment_user_pkey_path is defined
- name: Fix ssh-pub-key permissions
file:
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
mode: 0600
owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined

View File

@@ -0,0 +1,15 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -0,0 +1,51 @@
---
- name: Check ansible version
import_playbook: kubernetes_sigs.kubespray.ansible_version
- name: Install mitogen
hosts: localhost
strategy: linear
vars:
mitogen_version: 0.3.2
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
ansible_connection: local
tasks:
- name: Create mitogen plugin dir
file:
path: "{{ item }}"
state: directory
mode: 0755
become: false
loop:
- "{{ playbook_dir }}/plugins/mitogen"
- "{{ playbook_dir }}/dist"
- name: Download mitogen release
get_url:
url: "{{ mitogen_url }}"
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
validate_certs: true
mode: 0644
- name: Extract archive
unarchive:
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
dest: "{{ playbook_dir }}/dist/"
- name: Copy plugin
ansible.posix.synchronize:
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
dest: "{{ playbook_dir }}/plugins/mitogen"
- name: Add strategy to ansible.cfg
community.general.ini_file:
path: ansible.cfg
mode: 0644
section: "{{ item.section | d('defaults') }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
with_items:
- option: strategy
value: mitogen_linear
- option: strategy_plugins
value: plugins/mitogen/ansible_mitogen/plugins/strategy

View File

@@ -0,0 +1,92 @@
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
## Using an Ansible inventory
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
```shell
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
```
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
```shell
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
```
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
```shell
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
```
## Using Terraform and Ansible
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
```ini
cluster_name = "cluster1"
number_of_k8s_masters = "1"
number_of_k8s_masters_no_floating_ip = "2"
number_of_k8s_nodes_no_floating_ip = "0"
number_of_k8s_nodes = "0"
public_key_path = "~/.ssh/my-desired-key.pub"
image = "Ubuntu 16.04"
ssh_user = "ubuntu"
flavor_k8s_node = "node-flavor-id-in-your-openstack"
flavor_k8s_master = "master-flavor-id-in-your-openstack"
network_name = "k8s-network"
floatingip_pool = "net_external"
# GlusterFS variables
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
image_gfs = "Ubuntu 16.04"
number_of_gfs_nodes_no_floating_ip = "3"
gfs_volume_size_in_gb = "50"
ssh_user_gfs = "ubuntu"
```
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
```shell
$ source ~/.stackrc
$ eval $(ssh-agent -s)
$ ssh-add ~/.ssh/my-desired-key
$ echo Setting up Terraform creds && \
export TF_VAR_username=${OS_USERNAME} && \
export TF_VAR_password=${OS_PASSWORD} && \
export TF_VAR_tenant=${OS_TENANT_NAME} && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
```shell
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
```
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
```shell
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
```
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
```shell
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
```
If you need to destroy the cluster, you can run:
```shell
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
```

View File

@@ -0,0 +1,29 @@
---
- name: Bootstrap hosts
hosts: gfs-cluster
gather_facts: false
vars:
ansible_ssh_pipelining: false
roles:
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
hosts: all
gather_facts: true
- name: Install glusterfs server
hosts: gfs-cluster
vars:
ansible_ssh_pipelining: true
roles:
- { role: glusterfs/server }
- name: Install glusterfs servers
hosts: k8s_cluster
roles:
- { role: glusterfs/client }
- name: Configure Kubernetes to use glusterfs
hosts: kube_control_plane[0]
roles:
- { role: kubernetes-pv }

View File

@@ -0,0 +1 @@
../../../inventory/local/group_vars

View File

@@ -0,0 +1,43 @@
# ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
#
# ## GlusterFS nodes
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
# ## As in the previous case, you can set ip to give direct communication on internal IPs
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
# [kube_control_plane]
# node1
# node2
# [etcd]
# node1
# node2
# node3
# [kube_node]
# node2
# node3
# node4
# node5
# node6
# [k8s_cluster:children]
# kube_node
# kube_control_plane
# [gfs-cluster]
# gfs_node1
# gfs_node2
# gfs_node3
# [network-storage:children]
# gfs-cluster

View File

@@ -0,0 +1 @@
../../../../roles/bootstrap-os

View File

@@ -0,0 +1,50 @@
# Ansible Role: GlusterFS
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-glusterfs.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
Installs and configures GlusterFS on Linux.
## Requirements
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
```yaml
glusterfs_default_release: ""
```
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
```yaml
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.5"
```
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
## Dependencies
None.
## Example Playbook
```yaml
- hosts: server
roles:
- geerlingguy.glusterfs
```
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
## License
MIT / BSD
## Author Information
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@@ -0,0 +1,11 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
glusterfs_ppa_version: "4.1"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
gluster_brick_name: gluster

View File

@@ -0,0 +1,30 @@
---
dependencies: []
galaxy_info:
author: geerlingguy
description: GlusterFS installation for Linux.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: "2.0"
platforms:
- name: EL
versions:
- "6"
- "7"
- name: Ubuntu
versions:
- precise
- trusty
- xenial
- name: Debian
versions:
- wheezy
- jessie
galaxy_tags:
- system
- networking
- cloud
- clustering
- files
- sharing

View File

@@ -0,0 +1,21 @@
---
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
# hyperkube and needs to be installed as part of the system.
# Setup/install tasks.
- name: Setup RedHat distros for glusterfs
include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
- name: Setup Debian distros for glusterfs
include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
- name: Ensure Gluster mount directories exist.
file:
path: "{{ item }}"
state: directory
mode: 0775
with_items:
- "{{ gluster_mount_dir }}"
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined

View File

@@ -0,0 +1,24 @@
---
- name: Add PPA for GlusterFS.
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: yes
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent
with_items:
- glusterfs-client
when: glusterfs_ppa_added.changed
- name: Ensure GlusterFS client is installed.
apt:
name: "{{ item }}"
state: present
default_release: "{{ glusterfs_default_release }}"
with_items:
- glusterfs-client

View File

@@ -0,0 +1,14 @@
---
- name: Install Prerequisites
package:
name: "{{ item }}"
state: present
with_items:
- "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages
package:
name: "{{ item }}"
state: present
with_items:
- glusterfs-client

View File

@@ -0,0 +1,13 @@
---
# For Ubuntu.
glusterfs_default_release: ""
glusterfs_ppa_use: yes
glusterfs_ppa_version: "3.12"
# Gluster configuration.
gluster_mount_dir: /mnt/gluster
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
gluster_brick_name: gluster
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
disk_volume_device_1: /dev/vdb

View File

@@ -0,0 +1,30 @@
---
dependencies: []
galaxy_info:
author: geerlingguy
description: GlusterFS installation for Linux.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: "2.0"
platforms:
- name: EL
versions:
- "6"
- "7"
- name: Ubuntu
versions:
- precise
- trusty
- xenial
- name: Debian
versions:
- wheezy
- jessie
galaxy_tags:
- system
- networking
- cloud
- clustering
- files
- sharing

View File

@@ -0,0 +1,113 @@
---
# Include variables and define needed variables.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
# Install xfs package
- name: Install xfs Debian
apt:
name: xfsprogs
state: present
when: ansible_os_family == "Debian"
- name: Install xfs RedHat
package:
name: xfsprogs
state: present
when: ansible_os_family == "RedHat"
# Format external volumes in xfs
- name: Format volumes in xfs
community.general.filesystem:
fstype: xfs
dev: "{{ disk_volume_device_1 }}"
# Mount external volumes
- name: Mounting new xfs filesystem
ansible.posix.mount:
name: "{{ gluster_volume_node_mount_dir }}"
src: "{{ disk_volume_device_1 }}"
fstype: xfs
state: mounted
# Setup/install tasks.
- name: Setup RedHat distros for glusterfs
include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- name: Setup Debian distros for glusterfs
include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
- name: Ensure GlusterFS is started and enabled at boot.
service:
name: "{{ glusterfs_daemon }}"
state: started
enabled: yes
- name: Ensure Gluster brick and mount directories exist.
file:
path: "{{ item }}"
state: directory
mode: 0775
with_items:
- "{{ gluster_brick_dir }}"
- "{{ gluster_mount_dir }}"
- name: Configure Gluster volume with replicas
gluster.gluster.gluster_volume:
state: present
name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}"
replicas: "{{ groups['gfs-cluster'] | length }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: yes
run_once: true
when: groups['gfs-cluster'] | length > 1
- name: Configure Gluster volume without replicas
gluster.gluster.gluster_volume:
state: present
name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}"
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
host: "{{ inventory_hostname }}"
force: yes
run_once: true
when: groups['gfs-cluster'] | length <= 1
- name: Mount glusterfs to retrieve disk size
ansible.posix.mount:
name: "{{ gluster_mount_dir }}"
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
fstype: glusterfs
opts: "defaults,_netdev"
state: mounted
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Get Gluster disk size
setup:
filter: ansible_mounts
register: mounts_data
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Set Gluster disk size to variable
set_fact:
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Create file on GlusterFS
template:
dest: "{{ gluster_mount_dir }}/.test-file.txt"
src: test-file.txt
mode: 0644
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Unmount glusterfs
ansible.posix.mount:
name: "{{ gluster_mount_dir }}"
fstype: glusterfs
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
state: unmounted
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]

View File

@@ -0,0 +1,26 @@
---
- name: Add PPA for GlusterFS.
apt_repository:
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
state: present
update_cache: yes
register: glusterfs_ppa_added
when: glusterfs_ppa_use
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent
with_items:
- glusterfs-server
- glusterfs-client
when: glusterfs_ppa_added.changed
- name: Ensure GlusterFS is installed.
apt:
name: "{{ item }}"
state: present
default_release: "{{ glusterfs_default_release }}"
with_items:
- glusterfs-server
- glusterfs-client

View File

@@ -0,0 +1,15 @@
---
- name: Install Prerequisites
package:
name: "{{ item }}"
state: present
with_items:
- "centos-release-gluster{{ glusterfs_default_release }}"
- name: Install Packages
package:
name: "{{ item }}"
state: present
with_items:
- glusterfs-server
- glusterfs-client

View File

@@ -0,0 +1,2 @@
---
glusterfs_daemon: glusterd

View File

@@ -0,0 +1,2 @@
---
glusterfs_daemon: glusterd

View File

@@ -0,0 +1,23 @@
---
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
template:
src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}"
mode: 0644
with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
kube:
name: glusterfs
namespace: default
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
state: "{{ item.changed | ternary('latest', 'present') }}"
with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined

View File

@@ -0,0 +1,12 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs"
},
"spec": {
"ports": [
{"port": 1}
]
}
}

View File

@@ -0,0 +1,23 @@
{
"kind": "Endpoints",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs"
},
"subsets": [
{% for host in groups['gfs-cluster'] %}
{
"addresses": [
{
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
}
],
"ports": [
{
"port": 1
}
]
}{%- if not loop.last %}, {% endif -%}
{% endfor %}
]
}

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: glusterfs
spec:
capacity:
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
accessModes:
- ReadWriteMany
glusterfs:
endpoints: glusterfs
path: gluster
readOnly: false
persistentVolumeReclaimPolicy: Retain

View File

@@ -0,0 +1,3 @@
---
dependencies:
- {role: kubernetes-pv/ansible, tags: apps}

View File

@@ -0,0 +1,27 @@
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
## Important notice
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
## Client Setup
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
## Install
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
```
## Tear down
```shell
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
```
Add `--extra-vars "heketi_remove_lvm=true"` to the command above to remove LVM packages from the system

View File

@@ -0,0 +1,11 @@
---
- name: Tear down heketi
hosts: kube_control_plane[0]
roles:
- { role: tear-down }
- name: Teardown disks in heketi
hosts: heketi-node
become: yes
roles:
- { role: tear-down-disks }

View File

@@ -0,0 +1,12 @@
---
- name: Prepare heketi install
hosts: heketi-node
roles:
- { role: prepare }
- name: Provision heketi
hosts: kube_control_plane[0]
tags:
- "provision"
roles:
- { role: provision }

View File

@@ -0,0 +1,33 @@
all:
vars:
heketi_admin_key: "11elfeinhundertundelf"
heketi_user_key: "!!einseinseins"
glusterfs_daemonset:
readiness_probe:
timeout_seconds: 3
initial_delay_seconds: 3
liveness_probe:
timeout_seconds: 3
initial_delay_seconds: 10
children:
k8s_cluster:
vars:
kubelet_fail_swap_on: false
children:
kube_control_plane:
hosts:
node1:
etcd:
hosts:
node2:
kube_node:
hosts: &kube_nodes
node1:
node2:
node3:
node4:
heketi-node:
vars:
disk_volume_device_1: "/dev/vdb"
hosts:
<<: *kube_nodes

View File

@@ -0,0 +1 @@
jmespath

View File

@@ -0,0 +1,24 @@
---
- name: "Load lvm kernel modules"
become: true
with_items:
- "dm_snapshot"
- "dm_mirror"
- "dm_thin_pool"
community.general.modprobe:
name: "{{ item }}"
state: "present"
- name: "Install glusterfs mount utils (RedHat)"
become: true
package:
name: "glusterfs-fuse"
state: "present"
when: "ansible_os_family == 'RedHat'"
- name: "Install glusterfs mount utils (Debian)"
become: true
apt:
name: "glusterfs-client"
state: "present"
when: "ansible_os_family == 'Debian'"

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,3 @@
---
- name: "Stop port forwarding"
command: "killall "

View File

@@ -0,0 +1,64 @@
---
# Bootstrap heketi
- name: "Get state of heketi service, deployment and pods."
register: "initial_heketi_state"
changed_when: false
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
- name: "Bootstrap heketi."
when:
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
include_tasks: "bootstrap/deploy.yml"
# Prepare heketi topology
- name: "Get heketi initial pod state."
register: "initial_heketi_pod"
command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json"
changed_when: false
- name: "Ensure heketi bootstrap pod is up."
assert:
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
- name: Store the initial heketi pod name
set_fact:
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
- name: "Test heketi topology."
changed_when: false
register: "heketi_topology"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
- name: "Load heketi topology."
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
include_tasks: "bootstrap/topology.yml"
# Provision heketi database volume
- name: "Prepare heketi volumes."
include_tasks: "bootstrap/volumes.yml"
# Remove bootstrap heketi
- name: "Tear down bootstrap."
include_tasks: "bootstrap/tear-down.yml"
# Prepare heketi storage
- name: "Test heketi storage."
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
changed_when: false
register: "heketi_storage_state"
# ensure endpoints actually exist before trying to move database data to it
- name: "Create heketi storage."
include_tasks: "bootstrap/storage.yml"
vars:
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
when:
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"

View File

@@ -0,0 +1,27 @@
---
- name: "Kubernetes Apps | Lay Down Heketi Bootstrap"
become: true
template:
src: "heketi-bootstrap.json.j2"
dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
mode: 0640
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
kube:
name: "GlusterFS"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"
- name: "Wait for heketi bootstrap to complete."
changed_when: false
register: "initial_heketi_state"
vars:
initial_heketi_state: { stdout: "{}" }
pods_query: "items[?kind=='Pod'].status.conditions | [0][?type=='Ready'].status | [0]"
deployments_query: "items[?kind=='Deployment'].status.conditions | [0][?type=='Available'].status | [0]"
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
until:
- "initial_heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
- "initial_heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
retries: 60
delay: 5

View File

@@ -0,0 +1,33 @@
---
- name: "Test heketi storage."
command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json"
changed_when: false
register: "heketi_storage_state"
- name: "Create heketi storage."
kube:
name: "GlusterFS"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
state: "present"
vars:
secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']"
endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']"
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
when:
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
register: "heketi_storage_result"
- name: "Get state of heketi database copy job."
command: "{{ bin_dir }}/kubectl get jobs --output=json"
changed_when: false
register: "heketi_storage_state"
vars:
heketi_storage_state: { stdout: "{}" }
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]"
until:
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 1"
retries: 60
delay: 5

View File

@@ -0,0 +1,14 @@
---
- name: "Get existing Heketi deploy resources."
command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json"
register: "heketi_resources"
changed_when: false
- name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout | from_json | json_query('items[*]') | length > 0"
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout | from_json | json_query('items[*]') | length == 0"
retries: 60
delay: 5

View File

@@ -0,0 +1,27 @@
---
- name: "Get heketi topology."
changed_when: false
register: "heketi_topology"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
- name: "Render heketi topology template."
become: true
vars: { nodes: "{{ groups['heketi-node'] }}" }
register: "render"
template:
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
mode: 0644
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa no-handler
when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi"
- name: "Get heketi topology."
changed_when: false
register: "heketi_topology"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
until: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\") | flatten | length == groups['heketi-node'] | length"
retries: 60
delay: 5

View File

@@ -0,0 +1,41 @@
---
- name: "Get heketi volume ids."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
changed_when: false
register: "heketi_volumes"
- name: "Get heketi volumes."
changed_when: false
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
loop_control: { loop_var: "volume_id" }
register: "volumes_information"
- name: "Test heketi database volume."
set_fact: { heketi_database_volume_exists: true }
with_items: "{{ volumes_information.results }}"
loop_control: { loop_var: "volume_information" }
vars: { volume: "{{ volume_information.stdout | from_json }}" }
when: "volume.name == 'heketidbstorage'"
- name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod."
become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json"
changed_when: false
register: "heketi_volumes"
- name: "Get heketi volumes."
changed_when: false
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json"
with_items: "{{ heketi_volumes.stdout | from_json | json_query(\"volumes[*]\") }}"
loop_control: { loop_var: "volume_id" }
register: "volumes_information"
- name: "Test heketi database volume."
set_fact: { heketi_database_volume_created: true }
with_items: "{{ volumes_information.results }}"
loop_control: { loop_var: "volume_information" }
vars: { volume: "{{ volume_information.stdout | from_json }}" }
when: "volume.name == 'heketidbstorage'"
- name: "Ensure heketi database volume exists."
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }

View File

@@ -0,0 +1,4 @@
---
- name: "Clean up left over jobs."
command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\""
changed_when: false

View File

@@ -0,0 +1,44 @@
---
- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset"
template:
src: "glusterfs-daemonset.json.j2"
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
mode: 0644
become: true
register: "rendering"
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
kube:
name: "GlusterFS"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"
- name: "Kubernetes Apps | Label GlusterFS nodes"
include_tasks: "glusterfs/label.yml"
with_items: "{{ groups['heketi-node'] }}"
loop_control:
loop_var: "node"
- name: "Kubernetes Apps | Wait for daemonset to become available."
register: "daemonset_state"
command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true"
changed_when: false
vars:
daemonset_state: { stdout: "{}" }
ready: "{{ daemonset_state.stdout | from_json | json_query(\"status.numberReady\") }}"
desired: "{{ daemonset_state.stdout | from_json | json_query(\"status.desiredNumberScheduled\") }}"
until: "ready | int >= 3"
retries: 60
delay: 5
- name: "Kubernetes Apps | Lay Down Heketi Service Account"
template:
src: "heketi-service-account.json.j2"
dest: "{{ kube_config_dir }}/heketi-service-account.json"
mode: 0644
become: true
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
kube:
name: "GlusterFS"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/heketi-service-account.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"

View File

@@ -0,0 +1,19 @@
---
- name: Get storage nodes
register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: "Assign storage label"
when: "label_present.stdout_lines | length == 0"
command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs"
- name: Get storage nodes again
register: "label_present"
command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true"
changed_when: false
- name: Ensure the label has been set
assert:
that: "label_present | length > 0"
msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs."

View File

@@ -0,0 +1,34 @@
---
- name: "Kubernetes Apps | Lay Down Heketi"
become: true
template:
src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json"
mode: 0644
register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi"
kube:
name: "GlusterFS"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/heketi-deployment.json"
state: "{{ rendering.changed | ternary('latest', 'present') }}"
- name: "Ensure heketi is up and running."
changed_when: false
register: "heketi_state"
vars:
heketi_state:
stdout: "{}"
pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]"
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
until:
- "heketi_state.stdout | from_json | json_query(pods_query) == 'True'"
- "heketi_state.stdout | from_json | json_query(deployments_query) == 'True'"
retries: 60
delay: 5
- name: Set the Heketi pod name
set_fact:
heketi_pod_name: "{{ heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}"

Some files were not shown because too many files have changed in this diff Show More