This commit is contained in:
havelight-ee
2023-05-30 14:44:26 +09:00
parent 9a3174deef
commit 4c32a7239d
2598 changed files with 164595 additions and 487 deletions

View File

@@ -0,0 +1,6 @@
---
oci_security_list_management: All
oci_use_instance_principals: false
oci_cloud_controller_version: 0.7.0
oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci

View File

@@ -0,0 +1,67 @@
---
- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
fail:
msg: "oci_private_key is missing"
when:
- not oci_use_instance_principals
- oci_private_key is not defined or not oci_private_key
- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
fail:
msg: "oci_region_id is missing"
when:
- not oci_use_instance_principals
- oci_region_id is not defined or not oci_region_id
- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
fail:
msg: "oci_tenancy_id is missing"
when:
- not oci_use_instance_principals
- oci_tenancy_id is not defined or not oci_tenancy_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
fail:
msg: "oci_user_id is missing"
when:
- not oci_use_instance_principals
- oci_user_id is not defined or not oci_user_id
- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
fail:
msg: "oci_user_fingerprint is missing"
when:
- not oci_use_instance_principals
- oci_user_fingerprint is not defined or not oci_user_fingerprint
- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
fail:
msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
when:
- oci_compartment_id is not defined or not oci_compartment_id
- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
fail:
msg: "oci_vnc_id is missing. This is the Virtual Cloud Network in which the cluster resides"
when:
- oci_vnc_id is not defined or not oci_vnc_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
fail:
msg: "oci_subnet1_id is missingg. This is the first subnet to which loadbalancers will be added"
when:
- oci_subnet1_id is not defined or not oci_subnet1_id
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
fail:
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
when:
- oci_cloud_controller_version is version_compare('0.7.0', '<')
- oci_subnet2_id is not defined or not oci_subnet2_id
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
fail:
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
when:
- oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]

View File

@@ -0,0 +1,34 @@
---
- include: credentials-check.yml
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:
src: controller-manager-config.yml.j2
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
mode: 0644
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Slurp Configuration"
slurp:
src: "{{ kube_config_dir }}/controller-manager-config.yml"
register: controller_manager_config
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
mode: 0644
when: inventory_hostname == groups['kube_control_plane'][0]
- name: "OCI Cloud Controller | Apply Manifests"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@@ -0,0 +1,90 @@
{% macro private_key() %}{{ oci_private_key }}{% endmacro %}
{% if oci_use_instance_principals %}
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
# Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
{% endif %}
auth:
{% if oci_use_instance_principals %}
# This key is put here too for backwards compatibility
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
region: {{ oci_region_id }}
tenancy: {{ oci_tenancy_id }}
user: {{ oci_user_id }}
key: |
{{ oci_private_key }}
{% if oci_private_key_passphrase is defined %}
passphrase: {{ oci_private_key_passphrase }}
{% endif %}
fingerprint: {{ oci_user_fingerprint }}
{% endif %}
# compartment configures Compartment within which the cluster resides.
compartment: {{ oci_compartment_id }}
# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
vcn: {{ oci_vnc_id }}
loadBalancer:
# subnet1 configures one of two subnets to which load balancers will be added.
# OCI load balancers require two subnets to ensure high availability.
subnet1: {{ oci_subnet1_id }}
{% if oci_subnet2_id is defined %}
# subnet2 configures the second of two subnets to which load balancers will be
# added. OCI load balancers require two subnets to ensure high availability.
subnet2: {{ oci_subnet2_id }}
{% endif %}
# SecurityListManagementMode configures how security lists are managed by the CCM.
# "All" (default): Manage all required security list rules for load balancer services.
# "Frontend": Manage only security list rules for ingress to the load
# balancer. Requires that the user has setup a rule that
# allows inbound traffic to the appropriate ports for kube
# proxy health port, node port ranges, and health check port ranges.
# E.g. 10.82.0.0/16 30000-32000.
# "None": Disables all security list management. Requires that the
# user has setup a rule that allows inbound traffic to the
# appropriate ports for kube proxy health port, node port
# ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
# Additionally requires the user to mange rules to allow
# inbound traffic to load balancers.
securityListManagementMode: {{ oci_security_list_management }}
{% if oci_security_lists is defined and oci_security_lists|length > 0 %}
# Optional specification of which security lists to modify per subnet. This does not apply if security list management is off.
securityLists:
{% for subnet_ocid, list_ocid in oci_security_lists.items() %}
{{ subnet_ocid }}: {{ list_ocid }}
{% endfor %}
{% endif %}
{% if oci_rate_limit is defined and oci_rate_limit|length > 0 %}
# Optional rate limit controls for accessing OCI API
rateLimiter:
{% if oci_rate_limit.rate_limit_qps_read %}
rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_qps_write %}
rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_read %}
rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }}
{% endif %}
{% if oci_rate_limit.rate_limit_bucket_write %}
rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }}
{% endif %}
{% endif %}

View File

@@ -0,0 +1,73 @@
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
labels:
k8s-app: oci-cloud-controller-manager
spec:
selector:
matchLabels:
component: oci-cloud-controller-manager
tier: control-plane
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
component: oci-cloud-controller-manager
tier: control-plane
spec:
{% if oci_cloud_controller_pull_secret is defined %}
imagePullSecrets:
- name: {{oci_cloud_controller_pull_secret}}
{% endif %}
serviceAccountName: cloud-controller-manager
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
volumes:
- name: cfg
secret:
secretName: oci-cloud-controller-manager
- name: kubernetes
hostPath:
path: /etc/kubernetes
containers:
- name: oci-cloud-controller-manager
image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}}
command: ["/usr/local/bin/oci-cloud-controller-manager"]
args:
- --cloud-config=/etc/oci/cloud-provider.yaml
- --cloud-provider=oci
- --leader-elect-resource-lock=configmaps
- -v=2
volumeMounts:
- name: cfg
mountPath: /etc/oci
readOnly: true
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true