Deploy ingress, lego and dashboard

This commit is contained in:
Paul-Henri Froidmont 2018-09-26 04:40:24 +02:00
parent bf83e675f2
commit f468fd3e34
43 changed files with 1321 additions and 142 deletions

View file

@ -0,0 +1,10 @@
replicas_default_backend: 1
image_default_backend: gcr.io/google_containers/defaultbackend
version_default_backend: 1.4
nginx_ingress_controller_image: gcr.io/google_containers/nginx-ingress-controller
nginx_ingress_controller_version: 0.9.0-beta.15
scaleway_servername1: proxy1
scaleway_servername2: proxy2
scaleway_ipaddr: "" # set this in the inventory file
scaleway_reverse_ipaddr: "" # set this in the inventory file

View file

@ -0,0 +1,97 @@
---
- name: nginx_ingress_controller | Getting node labels
command: "kubectl get nodes -l role=ingress-controller"
register: nodes
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Printing nodes
debug: var=nodes
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Labelling proxy nodes with role=ingress_controller
command: "kubectl label node {{ hostvars[item].ansible_hostname }} role=ingress-controller"
with_items:
- "{{ groups['k8s_proxy'] }}"
when:
- inventory_hostname == initial_master
- hostvars[item].ansible_hostname not in nodes.stdout
- name: nginx_ingress_controller | Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- default-backend-controller.yml.j2
- default-backend-service.yml.j2
- nginx-ingress-clusterolebinding.yml.j2
- nginx-ingress-configmap.yml.j2
- nginx-ingress-sa.yml.j2
- nginx-ingress-clusterole.yml.j2
- nginx-ingress-controller.yml.j2
- nginx-ingress-service.yml.j2
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Deploy the nginx_ingress_controller
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
state: latest
with_items:
- { 'name': 'default-http-backend', 'type': 'deploy', 'file': '/tmp/default-backend-controller.yml' }
- { 'name': 'default-http-backend', 'type': 'svc', 'file': '/tmp/default-backend-service.yml' }
- { 'name': 'ingress', 'type': 'clusterrolebinding', 'file': '/tmp/nginx-ingress-clusterolebinding.yml' }
- { 'name': 'system:ingress', 'type': 'clusterrole', 'file': '/tmp/nginx-ingress-clusterole.yml' }
- { 'name': 'ingress', 'type': 'sa', 'file': '/tmp/nginx-ingress-sa.yml' }
- { 'name': 'nginx-ingress-cfg', 'type': 'configmap', 'file': '/tmp/nginx-ingress-configmap.yml' }
- { 'name': 'nginx-ingress-controller', 'type': 'deploy', 'file': '/tmp/nginx-ingress-controller.yml' }
- { 'name': 'nginx-ingress', 'type': 'svc', 'file': '/tmp/nginx-ingress-service.yml' }
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- default-backend-controller.yml
- default-backend-service.yml
- nginx-ingress-clusterolebinding.yml
- nginx-ingress-configmap.yml
- nginx-ingress-sa.yml
- nginx-ingress-clusterole.yml
- nginx-ingress-controller.yml
- nginx-ingress-service.yml
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Creating directory for scaleway-ipmove
file:
path: /usr/local/bin/scaleway-ipmove
state: directory
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | Getting scaleway-ipmove.py
git:
repo: https://github.com/chmod666org/scaleway-ipmove
dest: /usr/local/bin/scaleway-ipmove
force: yes
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | notify.sh
template:
src: notify.sh.j2
dest: /usr/local/bin/scaleway-ipmove/notify.sh
mode: 0500
owner: root
group: root
when: "'k8s_proxy' in group_names"
# this runs keepalived on proxy nodes
- name: nginx_ingress_controller | Templating keepalived on proxy node
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
with_items:
- { 'src': 'keepalived.yml.j2', 'dest': '/etc/kubernetes/manifests/keepalived.yml' }
when:
- "'k8s_proxy' in group_names"
- groups.k8s_proxy|length > 1

View file

@ -0,0 +1,39 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: {{ replicas_default_backend }}
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: {{ image_default_backend }}:{{ version_default_backend }}
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
nodeSelector:
role: ingress-controller

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: keepalived
namespace: kube-system
spec:
hostNetwork: true
volumes:
- hostPath:
path: /usr/local/bin/scaleway-ipmove/
name: scaleway-moveip
containers:
- name: keepalived
image: chmod666/keepalived:latest
# if tag is latest imagePullPolicy is always
# but when keepalived is backup a proxy may have no connection to the internet
# to avoid keepalived not starting in that case, we're putting imagePullPolicy: IfNotPresent
# assuming the image was already be pulled at cluster creation. Neat.
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt"
name: scaleway-moveip
securityContext:
capabilities:
add:
- NET_ADMIN
env:
- name: KEEPALIVED_INTERFACE
value: tun0
- name: KEEPALIVED_UNICAST_PEERS
value: "#PYTHON2BASH:['{{ groups['k8s_proxy'] | map('extract', hostvars, ['vpn_ip']) | join("', '") }}']"
- name: KEEPALIVED_VIRTUAL_IPS
value: "#PYTHON2BASH:['{{ keepalived_ip }}']"
- name: KEEPALIVED_PRIORITY
value: "{{ groups['k8s_proxy'].index(inventory_hostname) + 1 }}"
- name: KEEPALIVED_NOTIFY
value: "/mnt/notify.sh"

View file

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:ingress
rules:
- apiGroups:
- ""
resources: ["configmaps","secrets","endpoints","events","services"]
verbs: ["list","watch","create","update","delete","get"]
- apiGroups:
- ""
- "extensions"
resources: ["services","nodes","ingresses","pods","ingresses/status"]
verbs: ["list","watch","create","update","delete","get"]

View file

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:ingress
subjects:
- kind: ServiceAccount
name: ingress
namespace: kube-system

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-ingress-cfg
namespace: kube-system
labels:
app: nginx-ingress-cfg
data:
enable-sticky-sessions: 'true' ## use ROUTE cookie to provide session affinity
enable-vts-status: 'true' ## Allows the replacement of the default status page nginx-module-vts

View file

@ -0,0 +1,66 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
# on replica per proxy
replicas: {{ groups['k8s_proxy'] | length }}
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
serviceAccountName: ingress
terminationGracePeriodSeconds: 60
#https://github.com/kubernetes/contrib/issues/2135
# CNI and hostPort does not work using hostNetwork
hostNetwork: true
containers:
- image: {{ nginx_ingress_controller_image }}:{{ nginx_ingress_controller_version }}
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
#hostPort: 80
- containerPort: 443
#hostPort: 443
- containerPort: 18080
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-ingress-cfg
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress
namespace: kube-system

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress
namespace: kube-system
spec:
ports:
- port: 80
name: http
- port: 443
name: https
- port: 18080
name: http-mgmt
selector:
k8s-app: nginx-ingress-controller

View file

@ -0,0 +1,35 @@
#!/bin/bash
# for ANY state transition.
# "notify" script is called AFTER the
# notify_* script(s) and is executed
# with 3 arguments provided by keepalived
# (ie don't include parameters in the notify line).
# arguments
# $1 = "GROUP"|"INSTANCE"
# $2 = name of group or instance
# $3 = target state of transition
# ("MASTER"|"BACKUP"|"FAULT")
TYPE=$1
NAME=$2
STATE=$3
case $STATE in
"MASTER") echo "I'm the MASTER! Whup whup." > /proc/1/fd/1
echo "Here is the master"
# this put the public ip on the master using the scaleway api
/mnt/scaleway-ipmove.py {{ scaleway_token }} {{ scaleway_servername1 }} {{ scaleway_servername2 }} {{ scaleway_ipaddr }} {{ scaleway_reverse_ipaddr }} {{ scaleway_orga }}
exit 0
;;
"BACKUP") echo "Ok, i'm just a backup, great." > /proc/1/fd/1
echo "Here is the backup"
exit 0
;;
"FAULT") echo "Fault, what ?" > /proc/1/fd/1
exit 0
;;
*) echo "Unknown state" > /proc/1/fd/1
exit 1
;;
esac