Deploy ingress, lego and dashboard

This commit is contained in:
Paul-Henri Froidmont 2018-09-26 04:40:24 +02:00
parent bf83e675f2
commit f468fd3e34
43 changed files with 1321 additions and 142 deletions

View file

@ -0,0 +1,10 @@
replicas_default_backend: 1
image_default_backend: gcr.io/google_containers/defaultbackend
version_default_backend: 1.4
nginx_ingress_controller_image: gcr.io/google_containers/nginx-ingress-controller
nginx_ingress_controller_version: 0.9.0-beta.15
scaleway_servername1: proxy1
scaleway_servername2: proxy2
scaleway_ipaddr: "" # set this in the inventory file
scaleway_reverse_ipaddr: "" # set this in the inventory file

View file

@ -0,0 +1,97 @@
---
- name: nginx_ingress_controller | Getting node labels
command: "kubectl get nodes -l role=ingress-controller"
register: nodes
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Printing nodes
debug: var=nodes
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Labelling proxy nodes with role=ingress_controller
command: "kubectl label node {{ hostvars[item].ansible_hostname }} role=ingress-controller"
with_items:
- "{{ groups['k8s_proxy'] }}"
when:
- inventory_hostname == initial_master
- hostvars[item].ansible_hostname not in nodes.stdout
- name: nginx_ingress_controller | Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- default-backend-controller.yml.j2
- default-backend-service.yml.j2
- nginx-ingress-clusterolebinding.yml.j2
- nginx-ingress-configmap.yml.j2
- nginx-ingress-sa.yml.j2
- nginx-ingress-clusterole.yml.j2
- nginx-ingress-controller.yml.j2
- nginx-ingress-service.yml.j2
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Deploy the nginx_ingress_controller
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
state: latest
with_items:
- { 'name': 'default-http-backend', 'type': 'deploy', 'file': '/tmp/default-backend-controller.yml' }
- { 'name': 'default-http-backend', 'type': 'svc', 'file': '/tmp/default-backend-service.yml' }
- { 'name': 'ingress', 'type': 'clusterrolebinding', 'file': '/tmp/nginx-ingress-clusterolebinding.yml' }
- { 'name': 'system:ingress', 'type': 'clusterrole', 'file': '/tmp/nginx-ingress-clusterole.yml' }
- { 'name': 'ingress', 'type': 'sa', 'file': '/tmp/nginx-ingress-sa.yml' }
- { 'name': 'nginx-ingress-cfg', 'type': 'configmap', 'file': '/tmp/nginx-ingress-configmap.yml' }
- { 'name': 'nginx-ingress-controller', 'type': 'deploy', 'file': '/tmp/nginx-ingress-controller.yml' }
- { 'name': 'nginx-ingress', 'type': 'svc', 'file': '/tmp/nginx-ingress-service.yml' }
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- default-backend-controller.yml
- default-backend-service.yml
- nginx-ingress-clusterolebinding.yml
- nginx-ingress-configmap.yml
- nginx-ingress-sa.yml
- nginx-ingress-clusterole.yml
- nginx-ingress-controller.yml
- nginx-ingress-service.yml
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Creating directory for scaleway-ipmove
file:
path: /usr/local/bin/scaleway-ipmove
state: directory
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | Getting scaleway-ipmove.py
git:
repo: https://github.com/chmod666org/scaleway-ipmove
dest: /usr/local/bin/scaleway-ipmove
force: yes
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | notify.sh
template:
src: notify.sh.j2
dest: /usr/local/bin/scaleway-ipmove/notify.sh
mode: 0500
owner: root
group: root
when: "'k8s_proxy' in group_names"
# this runs keepalived on proxy nodes
- name: nginx_ingress_controller | Templating keepalived on proxy node
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
with_items:
- { 'src': 'keepalived.yml.j2', 'dest': '/etc/kubernetes/manifests/keepalived.yml' }
when:
- "'k8s_proxy' in group_names"
- groups.k8s_proxy|length > 1

View file

@ -0,0 +1,39 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
k8s-app: default-http-backend
namespace: kube-system
spec:
replicas: {{ replicas_default_backend }}
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: {{ image_default_backend }}:{{ version_default_backend }}
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
nodeSelector:
role: ingress-controller

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: keepalived
namespace: kube-system
spec:
hostNetwork: true
volumes:
- hostPath:
path: /usr/local/bin/scaleway-ipmove/
name: scaleway-moveip
containers:
- name: keepalived
image: chmod666/keepalived:latest
# if tag is latest imagePullPolicy is always
# but when keepalived is backup a proxy may have no connection to the internet
# to avoid keepalived not starting in that case, we're putting imagePullPolicy: IfNotPresent
# assuming the image was already be pulled at cluster creation. Neat.
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt"
name: scaleway-moveip
securityContext:
capabilities:
add:
- NET_ADMIN
env:
- name: KEEPALIVED_INTERFACE
value: tun0
- name: KEEPALIVED_UNICAST_PEERS
value: "#PYTHON2BASH:['{{ groups['k8s_proxy'] | map('extract', hostvars, ['vpn_ip']) | join("', '") }}']"
- name: KEEPALIVED_VIRTUAL_IPS
value: "#PYTHON2BASH:['{{ keepalived_ip }}']"
- name: KEEPALIVED_PRIORITY
value: "{{ groups['k8s_proxy'].index(inventory_hostname) + 1 }}"
- name: KEEPALIVED_NOTIFY
value: "/mnt/notify.sh"

View file

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:ingress
rules:
- apiGroups:
- ""
resources: ["configmaps","secrets","endpoints","events","services"]
verbs: ["list","watch","create","update","delete","get"]
- apiGroups:
- ""
- "extensions"
resources: ["services","nodes","ingresses","pods","ingresses/status"]
verbs: ["list","watch","create","update","delete","get"]

View file

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:ingress
subjects:
- kind: ServiceAccount
name: ingress
namespace: kube-system

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-ingress-cfg
namespace: kube-system
labels:
app: nginx-ingress-cfg
data:
enable-sticky-sessions: 'true' ## use ROUTE cookie to provide session affinity
enable-vts-status: 'true' ## Allows the replacement of the default status page nginx-module-vts

View file

@ -0,0 +1,66 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
# on replica per proxy
replicas: {{ groups['k8s_proxy'] | length }}
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
serviceAccountName: ingress
terminationGracePeriodSeconds: 60
#https://github.com/kubernetes/contrib/issues/2135
# CNI and hostPort does not work using hostNetwork
hostNetwork: true
containers:
- image: {{ nginx_ingress_controller_image }}:{{ nginx_ingress_controller_version }}
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
#hostPort: 80
- containerPort: 443
#hostPort: 443
- containerPort: 18080
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-ingress-cfg
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress
namespace: kube-system

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress
namespace: kube-system
spec:
ports:
- port: 80
name: http
- port: 443
name: https
- port: 18080
name: http-mgmt
selector:
k8s-app: nginx-ingress-controller

View file

@ -0,0 +1,35 @@
#!/bin/bash
# for ANY state transition.
# "notify" script is called AFTER the
# notify_* script(s) and is executed
# with 3 arguments provided by keepalived
# (ie don't include parameters in the notify line).
# arguments
# $1 = "GROUP"|"INSTANCE"
# $2 = name of group or instance
# $3 = target state of transition
# ("MASTER"|"BACKUP"|"FAULT")
TYPE=$1
NAME=$2
STATE=$3
case $STATE in
"MASTER") echo "I'm the MASTER! Whup whup." > /proc/1/fd/1
echo "Here is the master"
# this put the public ip on the master using the scaleway api
/mnt/scaleway-ipmove.py {{ scaleway_token }} {{ scaleway_servername1 }} {{ scaleway_servername2 }} {{ scaleway_ipaddr }} {{ scaleway_reverse_ipaddr }} {{ scaleway_orga }}
exit 0
;;
"BACKUP") echo "Ok, i'm just a backup, great." > /proc/1/fd/1
echo "Here is the backup"
exit 0
;;
"FAULT") echo "Fault, what ?" > /proc/1/fd/1
exit 0
;;
*) echo "Unknown state" > /proc/1/fd/1
exit 1
;;
esac

View file

@ -0,0 +1,15 @@
---
# set basic_auth_user as non-empty to enforce basic auth
basic_auth_user: ""
basic_auth_password: ""
# e.g. the fqdn would be k8s.yourdomain.tld if
# dashboard_subdomain=k8s
# scaleway_reverse_ipaddr=yourdomain.tld
dashboard_subdomain: k8s
dashboard_image: gcr.io/google_containers/kubernetes-dashboard-amd64
dashboard_version: v1.10.0
init_dashboard_image: gcr.io/google_containers/kubernetes-dashboard-init-amd64
init_dashboard_version: v1.0.1

View file

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-crb
roleRef:
apiGroup: ""
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View file

@ -0,0 +1,20 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create and watch for changes of 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "watch"]
- apiGroups: [""]
resources: ["secrets"]
# Allow Dashboard to get, update and delete 'kubernetes-dashboard-key-holder' secret.
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]

View file

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque

View file

@ -0,0 +1,13 @@
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard

View file

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: heapster
labels:
k8s-addon: monitoring-standalone.addons.k8s.io
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system

View file

@ -0,0 +1,77 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
labels:
k8s-addon: monitoring-standalone.addons.k8s.io
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.7.0
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.7.0
template:
metadata:
labels:
k8s-app: heapster
version: v1.7.0
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: heapster
containers:
- image: gcr.io/google_containers/heapster:v1.4.0
name: heapster
livenessProbe:
httpGet:
path: /healthz
port: 8082
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:2.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=80m
- --extra-cpu=0.5m
- --memory=140Mi
- --extra-memory=4Mi
- --deployment=heapster
- --container=heapster
- --poll-period=300000
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"

View file

@ -0,0 +1,24 @@
# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales
# the resources of the deployment if necessary.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: system:pod-nanny
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- "extensions"
resources:
- deployments
verbs:
- get
- update

View file

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: heapster-binding
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:pod-nanny
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
labels:
k8s-addon: monitoring-standalone.addons.k8s.io

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: heapster
namespace: kube-system
labels:
k8s-addon: monitoring-standalone.addons.k8s.io
kubernetes.io/name: "Heapster"
kubernetes.io/cluster-service: "true"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View file

@ -0,0 +1,105 @@
---
- block:
- name: Installing python-passlib
apt:
name: python-passlib
state: latest
register: result
retries: 3
until: result is success
- name: Creating htpasswd file if k8s has basic auth
htpasswd:
path: /tmp/auth
name: "{{ basic_auth_user }}"
password: "{{ basic_auth_password }}"
when: inventory_hostname == initial_master
- name: Getting secrets
command: kubectl get secrets --namespace=kube-system
register: secrets
when: inventory_hostname == initial_master
- name: Creating secret
command: kubectl create secret generic dashboard-basic-auth --namespace=kube-system --from-file=/tmp/auth
when:
- inventory_hostname == initial_master
- '"dashboard-basic-auth" not in secrets.stdout'
- name: Deleting basic_auth file
file:
path: /tmp/auth
state: absent
when: inventory_hostname == initial_master
when: basic_auth_user | length > 0
- name: Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- dashboard-ingress.yml.j2
- dashboard-deployment.yml.j2
when: inventory_hostname == initial_master
- name: Copying manifests files
copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
with_items:
- dashboard-rolebinding.yml
- dashboard-role.yml
- dashboard-sa.yml
- dashboard-clusterrolebinding.yml
- dashboard-secret.yml
- dashboard-service.yml
- heapster-rolebinding.yml
- heapster-clusterrolebinding.yml
- heapster-role.yml
- heapster-sa.yml
- heapster-service.yml
- heapster-deployment.yml
when: inventory_hostname == initial_master
- name: Deploying kubernetes-dashboard
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
state: latest
with_items:
- { 'name': 'kubernetes-dashboard', 'type': 'sa', 'file': '/tmp/dashboard-sa.yml' }
- { 'name': 'kubernetes-dashboard', 'type': 'clusterrolebinding', 'file': '/tmp/dashboard-clusterrolebinding.yml' }
- { 'name': 'kubernetes-dashboard', 'type': 'secret', 'file': '/tmp/dashboard-secret.yml' }
- { 'name': 'kubernetes-dashboard', 'type': 'service', 'file': '/tmp/dashboard-service.yml' }
- { 'name': 'kubernetes-dashboard', 'type': 'deployment', 'file': '/tmp/dashboard-deployment.yml' }
- { 'name': 'kubernetes-dashboard', 'type': 'ingress', 'file': '/tmp/dashboard-ingress.yml' }
- { 'name': 'heapster', 'type': 'sa', 'file': '/tmp/heapster-sa.yml' }
- { 'name': 'heapster', 'type': 'clusterrolebinding', 'file': '/tmp/heapster-clusterrolebinding.yml' }
- { 'name': 'heapster', 'type': 'rolebinding', 'file': '/tmp/heapster-rolebinding.yml' }
- { 'name': 'heapster', 'type': 'role', 'file': '/tmp/heapster-role.yml' }
- { 'name': 'heapster', 'type': 'service', 'file': '/tmp/heapster-service.yml' }
- { 'name': 'heapster', 'type': 'deployment', 'file': '/tmp/heapster-deployment.yml' }
when: inventory_hostname == initial_master
- name: Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- dashboard-ingress.yml.j2
- dashboard-deployment.yml.j2
- dashboard-clusterrolebinding.yml
- dashboard-rolebinding.yml
- dashboard-role.yml
- dashboard-sa.yml
- dashboard-secret.yml
- dashboard-service.yml
- heapster-rolebinding.yml
- heapster-clusterrolebinding.yml
- heapster-role.yml
- heapster-sa.yml
- heapster-service.yml
- heapster-deployment.yml
when: inventory_hostname == initial_master

View file

@ -0,0 +1,47 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: {{ dashboard_image }}:{{ dashboard_version }}
ports:
- containerPort: 9090
protocol: TCP
args:
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

View file

@ -0,0 +1,30 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
namespace: kube-system
name: kubernetes-dashboard
annotations:
# enable kube-lego for this ingress
kubernetes.io/tls-acme: "true"
{% if basic_auth_user | length > 0 %}
ingress.kubernetes.io/auth-type: basic
# name of the secret that contains the user/password definitions
ingress.kubernetes.io/auth-secret: dashboard-basic-auth
# message to display with an appropiate context why the authentication is required
ingress.kubernetes.io/auth-realm: "Authentication is required to access the k8s dashboard "
{% endif %}
spec:
# this enables tls for the specified domain names
tls:
- hosts:
- {{ dashboard_subdomain }}.{{ scaleway_reverse_ipaddr }}
secretName: dashboard-tls
rules:
- host: {{ dashboard_subdomain }}.{{ scaleway_reverse_ipaddr }}
http:
paths:
- path: /
backend:
serviceName: kubernetes-dashboard
servicePort: 80

View file

@ -24,7 +24,7 @@
shell: "ping {{ api_floating_ip }} -c 1"
register: result
changed_when: no
failed_when: ('100.0% packet loss' in result.stdout)
failed_when: ('100% packet loss' in result.stdout)
- include: packages.yml

View file

@ -0,0 +1,4 @@
---
lego_email: deckard@chmod666.org
lego_image: jetstack/kube-lego
lego_version: 0.1.5

40
roles/lego/tasks/main.yml Normal file
View file

@ -0,0 +1,40 @@
---
- name: kube_lego | Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- lego-sa.yml.j2
- lego-clusterolebinding.yml.j2
- lego-clusterole.yml.j2
- lego-configmap.yml.j2
- lego-controller.yml.j2
when: inventory_hostname == initial_master
- name: kube_lego | Deploying kube-lego
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
state: latest
with_items:
- { 'name': 'kube-lego', 'type': 'sa', 'file': '/tmp/lego-sa.yml' }
- { 'name': 'kube-lego', 'type': 'clusterrolebingind', 'file': '/tmp/lego-clusterolebinding.yml' }
- { 'name': 'kube-lego', 'type': 'clusterrole', 'file': '/tmp/lego-clusterole.yml' }
- { 'name': 'kube-lego', 'type': 'configmap', 'file': '/tmp/lego-configmap.yml' }
- { 'name': 'kube-lego', 'type': 'deploy', 'file': '/tmp/lego-controller.yml' }
when: inventory_hostname == initial_master
- name: kube_lego | Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- lego-namespace.yml
- lego-sa.yml
- lego-clusterolebinding.yml
- lego-clusterole.yml
- lego-configmap.yml
- lego-controller.yml
when: inventory_hostname == initial_master

View file

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:kube-lego
rules:
- apiGroups:
- ""
resources: ["configmaps","secrets","endpoints","events","services"]
verbs: ["list","watch","create","update","delete","get"]
- apiGroups:
- ""
- "extensions"
resources: ["services","nodes","ingresses","pods","ingresses/status"]
verbs: ["list","watch","create","update","delete","get"]

View file

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kube-lego
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-lego
subjects:
- kind: ServiceAccount
name: kube-lego
namespace: kube-system

View file

@ -0,0 +1,10 @@
apiVersion: v1
metadata:
name: kube-lego
namespace: kube-system
data:
# modify this to specify your address
lego.email: "{{ lego_email }}"
# configure letsencrypt's production api
lego.url: "https://acme-v01.api.letsencrypt.org/directory"
kind: ConfigMap

View file

@ -0,0 +1,48 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-lego
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: kube-lego
spec:
serviceAccountName: kube-lego
containers:
- name: kube-lego
image: "{{ lego_image }}:{{ lego_version }}"
imagePullPolicy: Always
ports:
- containerPort: 8080
env:
- name: LEGO_EMAIL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.email
- name: LEGO_URL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.url
- name: LEGO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LEGO_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 1
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-lego

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-lego
namespace: kube-system