Use treafik ingress controller instead of nginx

This commit is contained in:
Paul-Henri Froidmont 2018-10-02 00:22:19 +02:00
parent ed24aa4f8d
commit d8d9733ed9
25 changed files with 187 additions and 325 deletions

View file

@ -1,8 +1,6 @@
replicas_default_backend: 1
image_default_backend: gcr.io/google_containers/defaultbackend
version_default_backend: 1.4
nginx_ingress_controller_image: gcr.io/google_containers/nginx-ingress-controller
nginx_ingress_controller_version: 0.9.0-beta.15
scaleway_servername1: proxy1
scaleway_servername2: proxy2

View file

@ -1,14 +1,15 @@
---
- name: nginx_ingress_controller | Getting node labels
- name: traefik-ingress-controller | Getting node labels
command: "kubectl get nodes -l role=ingress-controller"
register: nodes
changed_when: no
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Printing nodes
- name: traefik-ingress-controller | Printing nodes
debug: var=nodes
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Labelling proxy nodes with role=ingress_controller
- name: traefik-ingress-controller | Labelling proxy nodes with role=ingress-controller
command: "kubectl label node {{ hostvars[item].ansible_hostname }} role=ingress-controller"
with_items:
- "{{ groups['k8s_proxy'] }}"
@ -16,67 +17,50 @@
- inventory_hostname == initial_master
- hostvars[item].ansible_hostname not in nodes.stdout
- name: nginx_ingress_controller | Templating manifests
- name: traefik-ingress-controller | Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- default-backend-controller.yml.j2
- default-backend-service.yml.j2
- nginx-ingress-clusterolebinding.yml.j2
- nginx-ingress-configmap.yml.j2
- nginx-ingress-sa.yml.j2
- nginx-ingress-clusterole.yml.j2
- nginx-ingress-controller.yml.j2
- nginx-ingress-service.yml.j2
- traefik-ingress-clusterolebinding.yml.j2
- traefik-ingress-configmap.yml.j2
- traefik-ingress-sa.yml.j2
- traefik-ingress-clusterole.yml.j2
- traefik-ingress-ds.yml.j2
- traefik-ingress-service.yml.j2
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Deploy the nginx_ingress_controller
- name: traefik-ingress-controller | Deploy the nginx_ingress_controller
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
filename: "{{ item }}"
state: latest
with_items:
- { 'name': 'default-http-backend', 'type': 'deploy', 'file': '/tmp/default-backend-controller.yml' }
- { 'name': 'default-http-backend', 'type': 'svc', 'file': '/tmp/default-backend-service.yml' }
- { 'name': 'ingress', 'type': 'clusterrolebinding', 'file': '/tmp/nginx-ingress-clusterolebinding.yml' }
- { 'name': 'system:ingress', 'type': 'clusterrole', 'file': '/tmp/nginx-ingress-clusterole.yml' }
- { 'name': 'ingress', 'type': 'sa', 'file': '/tmp/nginx-ingress-sa.yml' }
- { 'name': 'nginx-ingress-cfg', 'type': 'configmap', 'file': '/tmp/nginx-ingress-configmap.yml' }
- { 'name': 'nginx-ingress-controller', 'type': 'deploy', 'file': '/tmp/nginx-ingress-controller.yml' }
- { 'name': 'nginx-ingress', 'type': 'svc', 'file': '/tmp/nginx-ingress-service.yml' }
- '/tmp/default-backend-controller.yml'
- '/tmp/default-backend-service.yml'
- '/tmp/traefik-ingress-clusterolebinding.yml'
- '/tmp/traefik-ingress-configmap.yml'
- '/tmp/traefik-ingress-clusterole.yml'
- '/tmp/traefik-ingress-sa.yml'
- '/tmp/traefik-ingress-ds.yml'
- '/tmp/traefik-ingress-service.yml'
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- default-backend-controller.yml
- default-backend-service.yml
- nginx-ingress-clusterolebinding.yml
- nginx-ingress-configmap.yml
- nginx-ingress-sa.yml
- nginx-ingress-clusterole.yml
- nginx-ingress-controller.yml
- nginx-ingress-service.yml
when: inventory_hostname == initial_master
- name: nginx_ingress_controller | Creating directory for scaleway-ipmove
- name: traefik-ingress-controller | Creating directory for scaleway-ipmove
file:
path: /usr/local/bin/scaleway-ipmove
state: directory
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | Getting scaleway-ipmove.py
- name: traefik-ingress-controller | Getting scaleway-ipmove.py
git:
repo: https://github.com/chmod666org/scaleway-ipmove
dest: /usr/local/bin/scaleway-ipmove
force: yes
when: "'k8s_proxy' in group_names"
- name: nginx_ingress_controller | notify.sh
- name: traefik-ingress-controller | notify.sh
template:
src: notify.sh.j2
dest: /usr/local/bin/scaleway-ipmove/notify.sh
@ -86,7 +70,7 @@
when: "'k8s_proxy' in group_names"
# this runs keepalived on proxy nodes
- name: nginx_ingress_controller | Templating keepalived on proxy node
- name: traefik-ingress-controller | Templating keepalived on proxy node
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"

View file

@ -15,7 +15,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissable as long as:
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: {{ image_default_backend }}:{{ version_default_backend }}

View file

@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:ingress
rules:
- apiGroups:
- ""
resources: ["configmaps","secrets","endpoints","events","services"]
verbs: ["list","watch","create","update","delete","get"]
- apiGroups:
- ""
- "extensions"
resources: ["services","nodes","ingresses","pods","ingresses/status"]
verbs: ["list","watch","create","update","delete","get"]

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-ingress-cfg
namespace: kube-system
labels:
app: nginx-ingress-cfg
data:
enable-sticky-sessions: 'true' ## use ROUTE cookie to provide session affinity
enable-vts-status: 'true' ## Allows the replacement of the default status page nginx-module-vts

View file

@ -1,66 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
namespace: kube-system
spec:
# on replica per proxy
replicas: {{ groups['k8s_proxy'] | length }}
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
# like with kubeadm
# hostNetwork: true
serviceAccountName: ingress
terminationGracePeriodSeconds: 60
#https://github.com/kubernetes/contrib/issues/2135
# CNI and hostPort does not work using hostNetwork
hostNetwork: true
containers:
- image: {{ nginx_ingress_controller_image }}:{{ nginx_ingress_controller_version }}
name: nginx-ingress-controller
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
#hostPort: 80
- containerPort: 443
#hostPort: 443
- containerPort: 18080
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-ingress-cfg
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress
namespace: kube-system

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress
namespace: kube-system
spec:
ports:
- port: 80
name: http
- port: 443
name: https
- port: 18080
name: http-mgmt
selector:
k8s-app: nginx-ingress-controller

View file

@ -0,0 +1,23 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch

View file

@ -1,12 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ingress
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:ingress
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: ingress
namespace: kube-system
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View file

@ -0,0 +1,45 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: traefik-config
namespace: kube-system
labels:
app: traefik-ingress-controller
data:
traefik.toml: |-
checkNewVersion = false
IdleTimeout = "180s"
MaxIdleConnsPerHost = 500
logLevel = "INFO"
defaultEntryPoints = ["http", "https"]
[retry]
attempts = 3
[kubernetes]
[web]
address = ":8081"
[acme]
email = "letsencrypt.account@banditlair.com"
storage = "traefik/acme/account"
entryPoint = "https"
OnHostRule = true
onDemand = true
acmeLogging = true
# TODO Remove this line when going to prod
caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
[[acme.domains]]
main = "{{ scaleway_reverse_ipaddr }}"
[entryPoints]
[entryPoints.http]
address = ":80"
compress = true
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]

View file

@ -0,0 +1,59 @@
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
resources:
limits:
cpu: 200m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
hostPort: 443
containerPort: 443
- name: admin
containerPort: 8081
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
volumeMounts:
- mountPath: "/config"
name: "config"
args:
- --api
- --configfile=/config/traefik.toml
- --kubernetes
- --logLevel=INFO
volumes:
- name: config
configMap:
name: traefik-config
items:
- key: traefik.toml
path: traefik.toml
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system

View file

@ -0,0 +1,18 @@
kind: Service
apiVersion: v1
metadata:
name: traefik-web-ui
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: http
- protocol: TCP
port: 443
name: https
- protocol: TCP
port: 8080
name: admin

View file

@ -83,23 +83,3 @@
- { 'name': 'heapster', 'type': 'deployment', 'file': '/tmp/heapster-deployment.yml' }
when: inventory_hostname == initial_master
- name: Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- dashboard-ingress.yml.j2
- dashboard-deployment.yml.j2
- dashboard-clusterrolebinding.yml
- dashboard-rolebinding.yml
- dashboard-role.yml
- dashboard-sa.yml
- dashboard-secret.yml
- dashboard-service.yml
- heapster-rolebinding.yml
- heapster-clusterrolebinding.yml
- heapster-role.yml
- heapster-sa.yml
- heapster-service.yml
- heapster-deployment.yml
when: inventory_hostname == initial_master

View file

@ -4,8 +4,7 @@ metadata:
namespace: kube-system
name: kubernetes-dashboard
annotations:
# enable kube-lego for this ingress
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.class: traefik
{% if basic_auth_user | length > 0 %}
ingress.kubernetes.io/auth-type: basic
# name of the secret that contains the user/password definitions
@ -16,10 +15,10 @@ ingress.kubernetes.io/auth-type: basic
spec:
# this enables tls for the specified domain names
tls:
- hosts:
- {{ dashboard_subdomain }}.{{ scaleway_reverse_ipaddr }}
secretName: dashboard-tls
# tls:
# - hosts:
# - {{ dashboard_subdomain }}.{{ scaleway_reverse_ipaddr }}
# secretName: dashboard-tls
rules:
- host: {{ dashboard_subdomain }}.{{ scaleway_reverse_ipaddr }}
http:

View file

@ -1,4 +0,0 @@
---
lego_email: deckard@chmod666.org
lego_image: jetstack/kube-lego
lego_version: 0.1.5

View file

@ -1,40 +0,0 @@
---
- name: kube_lego | Templating manifests
template:
src: "{{ item }}"
dest: "/tmp/{{ item | regex_replace('.j2', '') }}"
with_items:
- lego-sa.yml.j2
- lego-clusterolebinding.yml.j2
- lego-clusterole.yml.j2
- lego-configmap.yml.j2
- lego-controller.yml.j2
when: inventory_hostname == initial_master
- name: kube_lego | Deploying kube-lego
kube:
name: "{{ item.name }}"
resource: "{{ item.type }}"
filename: "{{ item.file }}"
state: latest
with_items:
- { 'name': 'kube-lego', 'type': 'sa', 'file': '/tmp/lego-sa.yml' }
- { 'name': 'kube-lego', 'type': 'clusterrolebingind', 'file': '/tmp/lego-clusterolebinding.yml' }
- { 'name': 'kube-lego', 'type': 'clusterrole', 'file': '/tmp/lego-clusterole.yml' }
- { 'name': 'kube-lego', 'type': 'configmap', 'file': '/tmp/lego-configmap.yml' }
- { 'name': 'kube-lego', 'type': 'deploy', 'file': '/tmp/lego-controller.yml' }
when: inventory_hostname == initial_master
- name: kube_lego | Removing manifest
file:
path: "/tmp/{{ item }}"
state: absent
with_items:
- lego-namespace.yml
- lego-sa.yml
- lego-clusterolebinding.yml
- lego-clusterole.yml
- lego-configmap.yml
- lego-controller.yml
when: inventory_hostname == initial_master

View file

@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:kube-lego
rules:
- apiGroups:
- ""
resources: ["configmaps","secrets","endpoints","events","services"]
verbs: ["list","watch","create","update","delete","get"]
- apiGroups:
- ""
- "extensions"
resources: ["services","nodes","ingresses","pods","ingresses/status"]
verbs: ["list","watch","create","update","delete","get"]

View file

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kube-lego
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-lego
subjects:
- kind: ServiceAccount
name: kube-lego
namespace: kube-system

View file

@ -1,10 +0,0 @@
apiVersion: v1
metadata:
name: kube-lego
namespace: kube-system
data:
# modify this to specify your address
lego.email: "{{ lego_email }}"
# configure letsencrypt's production api
lego.url: "https://acme-v01.api.letsencrypt.org/directory"
kind: ConfigMap

View file

@ -1,48 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-lego
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: kube-lego
spec:
serviceAccountName: kube-lego
containers:
- name: kube-lego
image: "{{ lego_image }}:{{ lego_version }}"
imagePullPolicy: Always
ports:
- containerPort: 8080
env:
- name: LEGO_EMAIL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.email
- name: LEGO_URL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.url
- name: LEGO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LEGO_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 1
nodeSelector:
# node must be labelled with roles=ingress-controller
role: ingress-controller

View file

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-lego

View file

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-lego
namespace: kube-system