Provision k8s cluster with Kubespray

This commit is contained in:
Paul-Henri Froidmont 2019-04-04 02:25:38 +02:00
parent 2f9be424d3
commit d1db285cf0
13 changed files with 108 additions and 239 deletions

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "kubespray"]
path = kubespray
url = ssh://git@gitlab.banditlair.com:2224/phfroidmont/kubespray.git

View file

@ -6,8 +6,13 @@ host_key_checking = False
nocows = 1
remote_user = root
retry_files_enabled = False
library = kubespray/library/
roles_path = kubespray/roles/
[ssh_connection]
control_path = /tmp/ansible-ssh-%%h-%%p-%%r
pipelining = True
ssh_args = -C -o ControlMaster=auto -o ControlPersist=5m -o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
[inventory]
enable_plugins = host_list, scaleway, ini, script, yaml

View file

@ -0,0 +1,2 @@
kube_network_plugin: flannel
bin_dir: /usr/local/bin

View file

@ -0,0 +1,10 @@
[kube-master]
[etcd]
[kube-node]
[k8s]
[k8s-cluster:children]
k8s

View file

@ -0,0 +1,13 @@
plugin: scaleway
hostnames:
- hostname
regions:
- par1
- ams1
tags:
- k8s
- kube-master
- etcd
- kube-node
variables:
ansible_host: public_ip.address

View file

@ -1,4 +0,0 @@
[credentials]
token_file = ~/.ssh/scw-token
[config]
environment = staging

View file

@ -1,4 +0,0 @@
#!/bin/bash
cd "$(dirname $0)"
exec ../../scripts/scw_inventory.py

49
k8s.yml
View file

@ -1,24 +1,27 @@
---
- hosts: k8s_proxy:k8s_masters:k8s_workers
roles:
- role: proxy
tags: proxy
- role: docker
tags: docker
- hosts: k8s_masters
gather_facts: false
roles:
- role: etcd
tags: etcd
- hosts: k8s_proxy:k8s_masters:k8s_workers
gather_facts: false
roles:
- role: kubernetes
tags: kubernetes
- hosts: k8s_masters:k8s_proxy
gather_facts: false
roles:
- role: ingress
tags: ingress
- role: kubernetes-dashboard
tags: dashboard
- name: Include kubespray tasks
import_playbook: kubespray/cluster.yml
# - hosts: k8s_proxy:k8s_masters:k8s_workers
# roles:
# - role: proxy
# tags: proxy
# - role: docker
# tags: docker
# - hosts: k8s_masters
# gather_facts: false
# roles:
# - role: etcd
# tags: etcd
# - hosts: k8s_proxy:k8s_masters:k8s_workers
# gather_facts: false
# roles:
# - role: kubernetes
# tags: kubernetes
# - hosts: k8s_masters:k8s_proxy
# gather_facts: false
# roles:
# - role: ingress
# tags: ingress
# - role: kubernetes-dashboard
# tags: dashboard

1
kubespray Submodule

@ -0,0 +1 @@
Subproject commit a8dd69cf1777996873570448110239adba605a05

View file

@ -1,110 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generate an inventory of servers from scaleway which is
suitable for use as an ansible dynamic inventory
Right now, only the group 'cci-customer' is exported
"""
import configparser
import json
import os
from typing import Dict, Any
from scaleway.apis import ComputeAPI
class SCWInventory(object):
"""
The inventory class which calls out to scaleway and digests
The returned data, making it usable by ansible as an inventory
"""
response: Dict[str, Any]
def __init__(self):
self.inventory = None
self.auth_token = None
self.environment = None
self.response = {
'_meta': {
'hostvars': {
}
}
}
def parse_config(self, creds_file='scw.ini'):
"""
Parse the ini file to get the auth token
"""
config = configparser.ConfigParser()
config.read(creds_file)
with open(os.path.expanduser(config['credentials']['token_file']), 'r') as content_file:
self.auth_token = content_file.read().replace('\n', '')
self.environment = config['config']['environment']
def get_servers(self):
"""
Query scaleway api and pull down a list of servers
"""
self.parse_config()
api_par1 = ComputeAPI(auth_token=self.auth_token, region='par1')
api_ams1 = ComputeAPI(auth_token=self.auth_token, region='ams1')
result_par1 = api_par1.query().servers.get()
result_ams1 = api_ams1.query().servers.get()
self.inventory = [
[i['name'], i['public_ip'], i['tags'], i['private_ip']] for i in
result_par1['servers'] + result_ams1['servers']
]
for host, ip_info, tags, private_ip in self.inventory:
host_vars = {
'private_ip': private_ip,
'ansible_python_interpreter': '/usr/bin/python3'
}
if ip_info:
host_vars['ansible_host'] = ip_info['address']
host_vars['public_ip'] = ip_info['address']
else:
host_vars['ansible_host'] = private_ip
self.response['_meta']['hostvars'][host] = host_vars
if tags:
for tag in tags:
self._add_to_response(
tag,
host
)
for host, variables in self.response['_meta']['hostvars'].items():
if host != 'proxy1':
variables['ansible_ssh_common_args'] = '-o ProxyCommand="ssh -W %h:%p -q root@' + \
self.response['_meta']['hostvars']['proxy1']['public_ip'] \
+ ' -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"'
def _add_to_response(self, group, hostname):
"""
Add a host to a group within the response
"""
if group not in self.response:
self.response[group] = list()
if group in self.response:
self.response[group].append(hostname)
def print_inventory(self):
"""
Simply display the collected inventory
"""
print(json.dumps(self.response))
def main():
"""
Run the program starting here
"""
inventory = SCWInventory()
inventory.get_servers()
inventory.print_inventory()
if __name__ == '__main__':
main()

41
terraform/main.tf Normal file
View file

@ -0,0 +1,41 @@
provider "scaleway" {
region = "${var.region}"
}
data "scaleway_image" "ubuntu" {
architecture = "${var.architecture}"
name = "${var.image}"
}
//resource "scaleway_ip" "public_ip" {
// count = 1
//}
resource "scaleway_server" "node" {
count = "${var.node_instance_count}"
name = "node${count.index+1}"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.node_instance_type}"
state = "running"
dynamic_ip_required = true,
tags = ["k8s", "kube-node"]
}
resource "scaleway_server" "master" {
count = "${var.master_instance_count}"
name = "master${count.index+1}"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.master_instance_type}"
state = "running"
dynamic_ip_required = true,
tags = ["k8s", "kube-master","etcd"]
}
output "node_private_ips" {
value = ["${scaleway_server.node.*.private_ip}"]
}
output "master_private_ips" {
value = ["${scaleway_server.master.*.private_ip}"]
}

View file

@ -1,79 +0,0 @@
provider "scaleway" {
region = "${var.region}"
}
data "scaleway_image" "ubuntu" {
architecture = "${var.architecture}"
name = "${var.image}"
}
data "scaleway_image" "ubuntu_mini" {
architecture = "${var.architecture}"
name = "${var.mini_image}"
}
//resource "scaleway_ip" "public_ip" {
// count = 1
//}
resource "scaleway_server" "worker" {
count = "${var.worker_instance_count}"
name = "worker${count.index+1}"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.worker_instance_type}"
state = "running"
tags = ["k8s","k8s_workers"]
// volume {
// size_in_gb = 50
// type = "l_ssd"
// }
}
resource "scaleway_server" "master" {
count = "${var.master_instance_count}"
name = "master${count.index+1}"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.master_instance_type}"
state = "running"
tags = ["k8s","k8s_masters"]
}
resource "scaleway_server" "proxy1" {
count = 1
name = "proxy1"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.proxy_instance_type}"
public_ip = "51.158.77.6"
state = "running"
tags = ["k8s","k8s_proxy","primary"]
}
resource "scaleway_server" "proxy2" {
count = 1
name = "proxy2"
image = "${data.scaleway_image.ubuntu.id}"
type = "${var.proxy_instance_type}"
state = "running"
tags = ["k8s","k8s_proxy","secondary"]
}
output "worker_private_ips" {
value = ["${scaleway_server.worker.*.private_ip}"]
}
output "master_private_ips" {
value = ["${scaleway_server.master.*.private_ip}"]
}
output "proxy0_private_ips" {
value = ["${scaleway_server.proxy1.*.private_ip}"]
}
output "proxy1_private_ips" {
value = ["${scaleway_server.proxy2.*.private_ip}"]
}
output "public_ip" {
value = ["${scaleway_server.proxy1.*.public_ip}"]
}

View file

@ -7,33 +7,21 @@ variable "architecture" {
}
variable "image" {
default = "ubuntu-bionic-k8s"
}
variable "mini_image" {
default = "Ubuntu Mini Xenial 25G"
default = "Ubuntu Bionic"
}
variable "master_instance_type" {
default = "START1-S"
default = "DEV1-S"
}
variable "master_instance_count" {
default = 3
default = 1
}
variable "proxy_instance_type" {
default = "START1-S"
variable "node_instance_type" {
default = "DEV1-S"
}
variable "worker_instance_type" {
default = "START1-S"
}
variable "worker_volume_size" {
default = 100
}
variable "worker_instance_count" {
default = 3
variable "node_instance_count" {
default = 2
}