Install Kubernetes 1.11 and go back to Ubuntu Xenial until Bionic is officially supported

This commit is contained in:
Paul-Henri Froidmont 2018-09-25 17:07:38 +02:00
parent 5acc7652a9
commit bf83e675f2
26 changed files with 765 additions and 7 deletions

View file

@ -6,3 +6,8 @@ host_key_checking = False
nocows = 1
remote_user = root
retry_files_enabled = False
[ssh_connection]
control_path = /tmp/ansible-ssh-%%h-%%p-%%r
pipelining = True
ssh_args = -C -o ControlMaster=auto -o ControlPersist=5m -o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null

View file

@ -11,5 +11,5 @@ harden_linux_ufw_rules:
- rule: "allow"
to_port: "7000"
protocol: "udp"
docker_version: 18.06.*
docker_version: 17.03.*

View file

@ -6,9 +6,17 @@
- role: docker
tags: docker
- hosts: k8s_masters
gather_facts: false
roles:
- role: etcd
tags: etcd
- hosts: k8s_proxy:k8s_masters:k8s_workers
gather_facts: false
roles:
- role: kubernetes
tags: kubernetes
#- hosts: localhost
# become: yes
# gather_facts: no

View file

@ -1,11 +1,11 @@
---
- name: etcd replicated and outiside of kubeadm when multimasters
block:
- name: Install python3-pip
apt:
name: python3-pip
state: latest
cache_valid_time: 3600
register: result
retries: 3
until: result is success

View file

@ -0,0 +1,24 @@
---
# Kubernetes
kubernetes_apt_key: https://packages.cloud.google.com/apt/doc/apt-key.gpg
kubernetes_apt_channel: main
kubernetes_release: xenial
# versions can be found here
kubernetes_version: v1.11.3
kubernetes_port: 6443
# kubeadm
kubeadm_ignore_preflight_errors: ""
kubelet_fail_swap_on: True
# Flannel
cni_version: v0.10.0
# these will determine the number of pods you can run
# cirdr should be at least /16 https://kubernetes.io/docs/admin/kubeadm/
pod_subnet: 10.244.0.0/16
# floating balanced ip for k8s api
api_floating_ip: 192.168.66.253
api_floating_mask: 24
api_floating_port: 6443
router_id: 66

View file

@ -0,0 +1,6 @@
---
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet
command: systemctl restart kubelet

View file

@ -0,0 +1,40 @@
---
# flannel deployment
- name: Checking if flannel exists
shell: "ip link | awk '$2 ~ /^(flannel|cni)/ { print $0 }' | wc -l | awk '{ print $1 }'"
register: cni_deployment
changed_when: False
check_mode: False
- block:
- name: Determine physical interface to use with cni
shell: "ip route get 169.254.42.42 | head -n1 | sed -E 's/.+ dev ([^ ]+).+/\\1/'"
register: cni_interface
changed_when: False
check_mode: False
failed_when: "cni_interface.stdout is not match('^[a-z][a-z0-9]+$')"
- name: Create directories
file:
path: "{{ item }}"
state: directory
mode: 0755
with_items:
- /etc/kube-flannel
- /etc/cni/net.d
- /opt/cni/bin
- template:
src: kube-flannel.yml.j2
dest: /tmp/kube-flannel.yml
- name: Configure cni
shell: "kubectl apply -f /tmp/kube-flannel.yml"
register: cni_output
# flannel has trouble unless we restart the kubelet service
# we'll flush_handlers later
notify: restart kubelet
- debug: var="cni_output"
when: "cni_deployment.stdout != '2'"

View file

@ -0,0 +1,27 @@
---
- name: Install python3-pip
apt:
name: python3-pip
state: latest
cache_valid_time: 3600
register: result
retries: 3
until: result is success
- name: Install docker-compose
pip:
name: docker-compose
- name: Pull docker images
docker_image: name="{{ item }}"
with_items:
- "k8s.gcr.io/kube-apiserver-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-controller-manager-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-proxy-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-scheduler-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/pause-{{ kube_arch }}:3.1"
- "quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}"
- name: Pull etcd if not multimaster
docker_image: name="k8s.gcr.io/etcd-{{ kube_arch }}:{{ etcd_version }}"
when: groups.k8s_masters | length == 1

View file

@ -0,0 +1,30 @@
---
- name: Creating /etc/keepalived on master nodes
file:
path: /etc/keepalived
state: directory
- name: Templating /etc/keepalived/keepalived.conf
template:
src: keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
- name: Running keepalived container on masters nodes
docker_container:
name: keepalived_api
image: "chmod666/keepalived:latest"
state: started
detach: True
volumes:
- /etc/keepalived/keepalived.conf:/usr/local/etc/keepalived/keepalived.conf
capabilities:
- NET_ADMIN
network_mode: host
restart_policy: always
- name: Wait for keepalived to be started
shell: 'docker ps | grep chmod666/keepalived | grep "Up"'
register: result
until: result.stdout.find("chmod666/keepalived") != -1
retries: 18
delay: 10

View file

@ -0,0 +1,29 @@
---
# generating the kubeadm config file only on master nodes
- name: Creating kubeadm_config file
template:
src: kubeadm-config.j2
dest: /tmp/kubeadm_config
when:
- groups.k8s_masters | length > 1
- "'k8s_masters' in group_names"
# KUBELET_EXTRA_ARGS
- name: Additional configuration
template:
src: local-extras.conf.j2
dest: /etc/systemd/system/kubelet.service.d/90-local-extras.conf
mode: 0640
when:
- "kubelet_fail_swap_on == False"
notify:
- reload systemd
- restart kubelet
- meta: flush_handlers
- name: Creating .kube file in $HOME
file:
path: ~/.kube
state: directory
when: "'k8s_masters' in group_names"

View file

@ -0,0 +1,58 @@
---
# we get kubectl output to check if we have to add the node or not
# errors are ignored in case of the first initialization on the first master
- name: Getting kubectl output
shell: "kubectl get nodes"
register: kubectl_output
changed_when: False
check_mode: False
failed_when: kubectl_output.rc != 0 and not 'did you specify the right host' in kubectl_output.stderr
- block:
- name: Kubeadm init on first master in multimaster cluster
shell: |
kubeadm init \
--config /tmp/kubeadm_config
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
register: kubeadm_output
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
when: groups.k8s_masters | length > 1
- name: Kubeadm init on sole master
shell: |
kubeadm init \
--apiserver-advertise-address={{ vpn_ip }} \
--pod-network-cidr={{ pod_subnet }} \
--kubernetes-version {{ kubernetes_version }}
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
# flannel has trouble unless we restart the kubelet service
notify: restart kubelet
when:
- groups.k8s_masters | length == 1
- inventory_hostname == initial_master
- name: Kubeadm output
debug: var=kubeadm_output
when: ansible_hostname not in kubectl_output.stdout
- name: Copying /etc/kubernetes/admin.conf to ~/.kube/config
copy:
src: /etc/kubernetes/admin.conf
dest: ~/.kube/config
remote_src: yes
- include: cni.yml
- name: Wait for master to be ready
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
register: result
until: result.stdout.find("Ready") == 0
retries: 36
delay: 10
- meta: flush_handlers

View file

@ -0,0 +1,39 @@
---
- name: Distribute /etc/kubernetes/pki to other masters
synchronize:
src: /etc/kubernetes/pki/
dest: /etc/kubernetes/pki/
recursive: True
delegate_to: "{{ initial_master }}"
# forward ssh agent by preserving environment variables with sudo
become_flags: "-E"
- block:
- name: Initializing other masters
shell: |
kubeadm init \
--config /tmp/kubeadm_config
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
register: kubeadm_output
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
- name: Kubeadm output
debug: var=kubeadm_output
when: ansible_hostname not in hostvars[initial_master]['kubectl_output'].stdout
# fixing kubectl
- name: kubectl config
copy:
src: /etc/kubernetes/kubelet.conf
dest: /root/.kube/config
remote_src: True
- name: Wait for master to be ready
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
register: result
until: result.stdout.find("Ready") == 0
retries: 36
delay: 10

View file

@ -0,0 +1,26 @@
---
- block:
- name: Get an existing kubeadm join token
shell: |
kubeadm token list 2>/dev/null \
| awk '$4 == "authentication,signing" { print $1 }' \
| head -n1
register: kubeadm_token_list
changed_when: False
check_mode: False
failed_when: False
- name: Generate a new kubeadm token
shell: "kubeadm token create 2>/dev/null || kubeadm token generate"
register: kubeadm_token_create
when: kubeadm_token_list.stdout | length == 0
- set_fact:
kubeadm_token: |-
{%- if kubeadm_token_list.stdout | length > 0 -%}
{{ kubeadm_token_list.stdout }}
{%- else -%}
{{ kubeadm_token_create.stdout }}
{%- endif -%}
when: kubeadm_token|default('') | length == 0

View file

@ -0,0 +1,31 @@
---
- name: Checking if kube-proxy is Running
shell: "ps -ef | grep [k]ube-proxy"
register: kube_proxy_running
ignore_errors: True
- block:
- name: Joining cluster on other nodes
shell: |
kubeadm join \
--token="{{ hostvars[initial_master].kubeadm_token }}" \
{{ item.ipv4 }}:{{ item.port }} \
{%- if kubeadm_ignore_preflight_errors | length > 0 %}
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }} \
{% endif %}
--discovery-token-unsafe-skip-ca-verification
register: kubeadm_output
failed_when: "'This node has joined the cluster' not in kubeadm_output.stdout"
when: item.when | bool == True
with_items:
- ipv4: "{{ api_floating_ip }}"
port: "{{ api_floating_port }}"
when: "{{ groups.k8s_masters | length > 1 }}"
- ipv4: "{{ hostvars[initial_master].vpn_ip }}"
port: 6443
when: "{{ groups.k8s_masters | length == 1 }}"
- name: Kubeadm output
debug: var=kubeadm_output
when: "'/usr/local/bin/kube-proxy' not in kube_proxy_running.stdout"

View file

@ -0,0 +1,75 @@
---
- set_fact: kube_arch="{{ ansible_architecture | replace('x86_64', 'amd64') | replace('arm', 'armhf') }}"
- include: modules.yml
- include: keepalived.yml
when:
- "'k8s_masters' in group_names"
- groups.k8s_masters | length > 1
# at this point everyone should be able to ping the api floating ip if multimaster
# add a test here and don't continue until everyone does not ping the api ip
- name: Install iputils-ping
apt:
name: iputils-ping
state: latest
cache_valid_time: 3600
register: result
retries: 3
until: result is success
- name: Check all hosts can ping API floating IP
shell: "ping {{ api_floating_ip }} -c 1"
register: result
changed_when: no
failed_when: ('100.0% packet loss' in result.stdout)
- include: packages.yml
- include: kubeadm-token.yml
when: inventory_hostname == initial_master
- include: kubeadm-config.yml
# add masters
- block:
# docker-in-docker sometimes hangs pulling images so explicitly do it here
- include: docker-images.yml
- include: kubeadm-master.yml
when: inventory_hostname == initial_master
# then we create the other masters
- include: kubeadm-multi.yml
when: inventory_hostname != initial_master
when: "'k8s_masters' in group_names"
- name: Wait for coredns to be running
shell: "kubectl get pods --namespace=kube-system | grep coredns | grep Running | wc -l"
register: result
until: result.stdout.find("2") != -1
retries: 180
delay: 10
when: inventory_hostname == initial_master
- include: kubeadm-token.yml
when: inventory_hostname == initial_master
# add non masters
- include: kubeadm-worker.yml
when: "'k8s_masters' not in group_names"
# remove this wait and had a test to check all nodes are ready
- name: Wait for all nodes to be ready
shell: "kubectl get nodes {{ ansible_hostname }} | tail -n+2 | awk '{ print $2 }'"
register: result
until: result.stdout.find("Ready") == 0
retries: 36
delay: 10
delegate_to: "{{ initial_master }}"
- debug: var=result

View file

@ -0,0 +1,30 @@
---
- name: Get the kernel revision
shell: "uname -r"
register: kernel
changed_when: False
check_mode: False
# allow failure as the package may not exist
- name: Try install linux-image
apt:
state: present
name: "{{ 'linux-image-' + kernel.stdout }}"
register: result
failed_when: False
- name: modprobe
modprobe:
name: "{{ item }}"
state: present
with_items:
- ip_vs
- nf_conntrack_ipv4
- name: /etc/modules
lineinfile:
path: /etc/modules
line: "{{ item }}"
with_items:
- ip_vs
- nf_conntrack_ipv4

View file

@ -0,0 +1,19 @@
---
- name: Adding Kubernetes official gpg key
apt_key:
url: "{{ kubernetes_apt_key }}"
state: present
- name: Adding Kubernetes repository
apt_repository:
repo: "deb http://apt.kubernetes.io/ kubernetes-{{ kubernetes_release }} {{ kubernetes_apt_channel }}"
state: present
filename: 'kubernetes'
- name: Installing kubernetes core components (kubectl, kubelet ...)
apt:
name: ['kubelet', 'kubeadm', 'kubectl', 'kubernetes-cni']
state: latest
register: result
retries: 3
until: result is success

View file

@ -0,0 +1,69 @@
global_defs {
{% if ansible_tun0 is defined %}
default_interface tun0
{% else %}
default_interface eth0
{% endif %}
}
vrrp_instance VI_1 {
{% if ansible_tun0 is defined %}
interface tun0
{% else %}
interface eth0
{% endif %}
track_interface {
{% if ansible_tun0 is defined %}
tun0
{% else %}
eth0
{% endif %}
}
{% if inventory_hostname == initial_master %}
state MASTER
priority 100
{% else %}
state BACKUP
priority 50
{% endif %}
virtual_router_id {{ router_id }}
nopreempt
unicast_peer {
{% for host in groups['k8s_masters'] %}
{{ hostvars[host]['vpn_ip'] }}
{% endfor %}
}
virtual_ipaddress {
{{ api_floating_ip }}/{{ api_floating_mask }}
}
authentication {
auth_type PASS
auth_pass d0cker
}
notify "/container/service/keepalived/assets/notify.sh"
}
virtual_server {{ api_floating_ip }} {{ api_floating_port }} {
delay_loop 10
protocol TCP
lb_algo rr
# Use direct routing
lb_kind DR
persistence_timeout 7200
{% for host in groups['k8s_masters'] %}
real_server {{ hostvars[host]['vpn_ip'] }} {{ api_floating_port }} {
weight 1
TCP_CHECK {
connect_timeout 5
connect_port 6443
}
}
{% endfor %}
}

View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: keepalived
namespace: kube-system
spec:
hostNetwork: true
volumes:
- hostPath:
path: /etc/keepalived/keepalived.conf
type: File
name: keepalived-config
containers:
- name: keepalived
image: chmod666/keepalived:latest
# if tag is latest imagePullPolicy is always
# but when keepalived is backup a proxy may have no connection to the internet
# to avoid keepalived not starting in that case, we're putting imagePullPolicy: IfNotPresent
# assuming the image was already be pulled at cluster creation. Neat.
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/usr/local/etc/keepalived/keepalived.conf"
name: keepalived-config
securityContext:
capabilities:
add:
- NET_ADMIN
#env:
# - name: KEEPALIVED_INTERFACE
# value: tun0
# - name: KEEPALIVED_UNICAST_PEERS
# value: "#PYTHON2BASH:['{{ groups['masters'] | map('extract', hostvars, ['vpn_ip']) | join("', '") }}']"
# - name: KEEPALIVED_VIRTUAL_IPS
# value: "#PYTHON2BASH:['{{ api_floating_ip }}/{{ api_floating_mask }}']"
# - name: KEEPALIVED_PRIORITY
# value: "{{ groups['masters'].index(inventory_hostname) + 1 }}"

View file

@ -0,0 +1,160 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: {{ kube_arch }}
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface={{ cni_interface.stdout | trim }}
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View file

@ -0,0 +1,27 @@
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
api:
advertiseAddress: {{ api_floating_ip if groups.k8s_masters | length > 1 else hostvars[initial_master].vpn_ip }}
etcd:
external:
endpoints:
{% for host in groups['k8s_masters'] %}
- "http://{{ hostvars[host]['vpn_ip'] }}:2379"
{% endfor %}
networking:
podSubnet: "{{ pod_subnet }}"
kubernetesVersion: "{{ kubernetes_version }}"
apiServerCertSANs:
{% for host in groups['k8s_masters'] %}
- "{{ hostvars[host]['vpn_ip'] }}"
{% endfor %}
- "{{ api_floating_ip }}"
- "127.0.0.1"
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: "{{ hostvars[initial_master].kubeadm_token }}"
ttl: 0s
usages:
- signing
- authentication

View file

@ -0,0 +1,2 @@
[Service]
Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false"

View file

@ -10,6 +10,13 @@
recurse: True
state: directory
- name: create /etc/tinc/nets.boot file from template
template:
src: nets.boot.j2
dest: /etc/tinc/nets.boot
notify:
- restart tinc
- name: ensure tinc.conf contains connection to all other nodes
template:
src: tinc.conf.j2
@ -117,7 +124,7 @@
- name: start tinc on boot
systemd:
name: "tinc@{{netname}}"
name: tinc
enabled: yes
state: started

View file

@ -0,0 +1 @@
{{ netname }}

View file

@ -7,6 +7,11 @@ data "scaleway_image" "ubuntu" {
name = "${var.image}"
}
data "scaleway_image" "ubuntu_mini" {
architecture = "${var.architecture}"
name = "${var.mini_image}"
}
//resource "scaleway_ip" "public_ip" {
// count = 1
//}
@ -37,7 +42,7 @@ resource "scaleway_server" "master" {
resource "scaleway_server" "proxy1" {
count = 1
name = "proxy1"
image = "${data.scaleway_image.ubuntu.id}"
image = "${data.scaleway_image.ubuntu_mini.id}"
type = "${var.proxy_instance_type}"
public_ip = "51.158.77.6"
state = "running"
@ -47,7 +52,7 @@ resource "scaleway_server" "proxy1" {
resource "scaleway_server" "proxy2" {
count = 1
name = "proxy2"
image = "${data.scaleway_image.ubuntu.id}"
image = "${data.scaleway_image.ubuntu_mini.id}"
type = "${var.proxy_instance_type}"
state = "running"
tags = ["k8s","k8s_proxy","secondary"]

View file

@ -7,7 +7,11 @@ variable "architecture" {
}
variable "image" {
default = "Ubuntu Bionic"
default = "Ubuntu Xenial"
}
variable "mini_image" {
default = "Ubuntu Mini Xenial 25G"
}
variable "master_instance_type" {
@ -19,7 +23,7 @@ variable "master_instance_count" {
}
variable "proxy_instance_type" {
default = "START1-S"
default = "START1-XS"
}
variable "worker_instance_type" {