mirror of
https://github.com/phfroidmont/self-hosting.git
synced 2025-12-25 13:46:59 +01:00
Install Kubernetes 1.11 and go back to Ubuntu Xenial until Bionic is officially supported
This commit is contained in:
parent
5acc7652a9
commit
bf83e675f2
26 changed files with 765 additions and 7 deletions
40
roles/kubernetes/tasks/cni.yml
Normal file
40
roles/kubernetes/tasks/cni.yml
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
# flannel deployment
|
||||
- name: Checking if flannel exists
|
||||
shell: "ip link | awk '$2 ~ /^(flannel|cni)/ { print $0 }' | wc -l | awk '{ print $1 }'"
|
||||
register: cni_deployment
|
||||
changed_when: False
|
||||
check_mode: False
|
||||
|
||||
- block:
|
||||
- name: Determine physical interface to use with cni
|
||||
shell: "ip route get 169.254.42.42 | head -n1 | sed -E 's/.+ dev ([^ ]+).+/\\1/'"
|
||||
register: cni_interface
|
||||
changed_when: False
|
||||
check_mode: False
|
||||
failed_when: "cni_interface.stdout is not match('^[a-z][a-z0-9]+$')"
|
||||
|
||||
- name: Create directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
with_items:
|
||||
- /etc/kube-flannel
|
||||
- /etc/cni/net.d
|
||||
- /opt/cni/bin
|
||||
|
||||
- template:
|
||||
src: kube-flannel.yml.j2
|
||||
dest: /tmp/kube-flannel.yml
|
||||
|
||||
- name: Configure cni
|
||||
shell: "kubectl apply -f /tmp/kube-flannel.yml"
|
||||
register: cni_output
|
||||
# flannel has trouble unless we restart the kubelet service
|
||||
# we'll flush_handlers later
|
||||
notify: restart kubelet
|
||||
|
||||
- debug: var="cni_output"
|
||||
|
||||
when: "cni_deployment.stdout != '2'"
|
||||
27
roles/kubernetes/tasks/docker-images.yml
Normal file
27
roles/kubernetes/tasks/docker-images.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
- name: Install python3-pip
|
||||
apt:
|
||||
name: python3-pip
|
||||
state: latest
|
||||
cache_valid_time: 3600
|
||||
register: result
|
||||
retries: 3
|
||||
until: result is success
|
||||
|
||||
- name: Install docker-compose
|
||||
pip:
|
||||
name: docker-compose
|
||||
|
||||
- name: Pull docker images
|
||||
docker_image: name="{{ item }}"
|
||||
with_items:
|
||||
- "k8s.gcr.io/kube-apiserver-{{ kube_arch }}:{{ kubernetes_version }}"
|
||||
- "k8s.gcr.io/kube-controller-manager-{{ kube_arch }}:{{ kubernetes_version }}"
|
||||
- "k8s.gcr.io/kube-proxy-{{ kube_arch }}:{{ kubernetes_version }}"
|
||||
- "k8s.gcr.io/kube-scheduler-{{ kube_arch }}:{{ kubernetes_version }}"
|
||||
- "k8s.gcr.io/pause-{{ kube_arch }}:3.1"
|
||||
- "quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}"
|
||||
|
||||
- name: Pull etcd if not multimaster
|
||||
docker_image: name="k8s.gcr.io/etcd-{{ kube_arch }}:{{ etcd_version }}"
|
||||
when: groups.k8s_masters | length == 1
|
||||
30
roles/kubernetes/tasks/keepalived.yml
Normal file
30
roles/kubernetes/tasks/keepalived.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
- name: Creating /etc/keepalived on master nodes
|
||||
file:
|
||||
path: /etc/keepalived
|
||||
state: directory
|
||||
|
||||
- name: Templating /etc/keepalived/keepalived.conf
|
||||
template:
|
||||
src: keepalived.conf.j2
|
||||
dest: /etc/keepalived/keepalived.conf
|
||||
|
||||
- name: Running keepalived container on masters nodes
|
||||
docker_container:
|
||||
name: keepalived_api
|
||||
image: "chmod666/keepalived:latest"
|
||||
state: started
|
||||
detach: True
|
||||
volumes:
|
||||
- /etc/keepalived/keepalived.conf:/usr/local/etc/keepalived/keepalived.conf
|
||||
capabilities:
|
||||
- NET_ADMIN
|
||||
network_mode: host
|
||||
restart_policy: always
|
||||
|
||||
- name: Wait for keepalived to be started
|
||||
shell: 'docker ps | grep chmod666/keepalived | grep "Up"'
|
||||
register: result
|
||||
until: result.stdout.find("chmod666/keepalived") != -1
|
||||
retries: 18
|
||||
delay: 10
|
||||
29
roles/kubernetes/tasks/kubeadm-config.yml
Normal file
29
roles/kubernetes/tasks/kubeadm-config.yml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
# generating the kubeadm config file only on master nodes
|
||||
- name: Creating kubeadm_config file
|
||||
template:
|
||||
src: kubeadm-config.j2
|
||||
dest: /tmp/kubeadm_config
|
||||
when:
|
||||
- groups.k8s_masters | length > 1
|
||||
- "'k8s_masters' in group_names"
|
||||
|
||||
# KUBELET_EXTRA_ARGS
|
||||
- name: Additional configuration
|
||||
template:
|
||||
src: local-extras.conf.j2
|
||||
dest: /etc/systemd/system/kubelet.service.d/90-local-extras.conf
|
||||
mode: 0640
|
||||
when:
|
||||
- "kubelet_fail_swap_on == False"
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart kubelet
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Creating .kube file in $HOME
|
||||
file:
|
||||
path: ~/.kube
|
||||
state: directory
|
||||
when: "'k8s_masters' in group_names"
|
||||
58
roles/kubernetes/tasks/kubeadm-master.yml
Normal file
58
roles/kubernetes/tasks/kubeadm-master.yml
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
---
|
||||
# we get kubectl output to check if we have to add the node or not
|
||||
# errors are ignored in case of the first initialization on the first master
|
||||
- name: Getting kubectl output
|
||||
shell: "kubectl get nodes"
|
||||
register: kubectl_output
|
||||
changed_when: False
|
||||
check_mode: False
|
||||
failed_when: kubectl_output.rc != 0 and not 'did you specify the right host' in kubectl_output.stderr
|
||||
|
||||
- block:
|
||||
- name: Kubeadm init on first master in multimaster cluster
|
||||
shell: |
|
||||
kubeadm init \
|
||||
--config /tmp/kubeadm_config
|
||||
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
|
||||
{% endif %}
|
||||
register: kubeadm_output
|
||||
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
|
||||
when: groups.k8s_masters | length > 1
|
||||
|
||||
- name: Kubeadm init on sole master
|
||||
shell: |
|
||||
kubeadm init \
|
||||
--apiserver-advertise-address={{ vpn_ip }} \
|
||||
--pod-network-cidr={{ pod_subnet }} \
|
||||
--kubernetes-version {{ kubernetes_version }}
|
||||
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
|
||||
{% endif %}
|
||||
# flannel has trouble unless we restart the kubelet service
|
||||
notify: restart kubelet
|
||||
when:
|
||||
- groups.k8s_masters | length == 1
|
||||
- inventory_hostname == initial_master
|
||||
|
||||
- name: Kubeadm output
|
||||
debug: var=kubeadm_output
|
||||
|
||||
when: ansible_hostname not in kubectl_output.stdout
|
||||
|
||||
- name: Copying /etc/kubernetes/admin.conf to ~/.kube/config
|
||||
copy:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: ~/.kube/config
|
||||
remote_src: yes
|
||||
|
||||
- include: cni.yml
|
||||
|
||||
- name: Wait for master to be ready
|
||||
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
|
||||
register: result
|
||||
until: result.stdout.find("Ready") == 0
|
||||
retries: 36
|
||||
delay: 10
|
||||
|
||||
- meta: flush_handlers
|
||||
39
roles/kubernetes/tasks/kubeadm-multi.yml
Normal file
39
roles/kubernetes/tasks/kubeadm-multi.yml
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
- name: Distribute /etc/kubernetes/pki to other masters
|
||||
synchronize:
|
||||
src: /etc/kubernetes/pki/
|
||||
dest: /etc/kubernetes/pki/
|
||||
recursive: True
|
||||
delegate_to: "{{ initial_master }}"
|
||||
# forward ssh agent by preserving environment variables with sudo
|
||||
become_flags: "-E"
|
||||
|
||||
- block:
|
||||
- name: Initializing other masters
|
||||
shell: |
|
||||
kubeadm init \
|
||||
--config /tmp/kubeadm_config
|
||||
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
|
||||
{% endif %}
|
||||
register: kubeadm_output
|
||||
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
|
||||
|
||||
- name: Kubeadm output
|
||||
debug: var=kubeadm_output
|
||||
|
||||
when: ansible_hostname not in hostvars[initial_master]['kubectl_output'].stdout
|
||||
|
||||
# fixing kubectl
|
||||
- name: kubectl config
|
||||
copy:
|
||||
src: /etc/kubernetes/kubelet.conf
|
||||
dest: /root/.kube/config
|
||||
remote_src: True
|
||||
|
||||
- name: Wait for master to be ready
|
||||
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
|
||||
register: result
|
||||
until: result.stdout.find("Ready") == 0
|
||||
retries: 36
|
||||
delay: 10
|
||||
26
roles/kubernetes/tasks/kubeadm-token.yml
Normal file
26
roles/kubernetes/tasks/kubeadm-token.yml
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
- block:
|
||||
- name: Get an existing kubeadm join token
|
||||
shell: |
|
||||
kubeadm token list 2>/dev/null \
|
||||
| awk '$4 == "authentication,signing" { print $1 }' \
|
||||
| head -n1
|
||||
register: kubeadm_token_list
|
||||
changed_when: False
|
||||
check_mode: False
|
||||
failed_when: False
|
||||
|
||||
- name: Generate a new kubeadm token
|
||||
shell: "kubeadm token create 2>/dev/null || kubeadm token generate"
|
||||
register: kubeadm_token_create
|
||||
when: kubeadm_token_list.stdout | length == 0
|
||||
|
||||
- set_fact:
|
||||
kubeadm_token: |-
|
||||
{%- if kubeadm_token_list.stdout | length > 0 -%}
|
||||
{{ kubeadm_token_list.stdout }}
|
||||
{%- else -%}
|
||||
{{ kubeadm_token_create.stdout }}
|
||||
{%- endif -%}
|
||||
|
||||
when: kubeadm_token|default('') | length == 0
|
||||
31
roles/kubernetes/tasks/kubeadm-worker.yml
Normal file
31
roles/kubernetes/tasks/kubeadm-worker.yml
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
- name: Checking if kube-proxy is Running
|
||||
shell: "ps -ef | grep [k]ube-proxy"
|
||||
register: kube_proxy_running
|
||||
ignore_errors: True
|
||||
|
||||
- block:
|
||||
- name: Joining cluster on other nodes
|
||||
shell: |
|
||||
kubeadm join \
|
||||
--token="{{ hostvars[initial_master].kubeadm_token }}" \
|
||||
{{ item.ipv4 }}:{{ item.port }} \
|
||||
{%- if kubeadm_ignore_preflight_errors | length > 0 %}
|
||||
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }} \
|
||||
{% endif %}
|
||||
--discovery-token-unsafe-skip-ca-verification
|
||||
register: kubeadm_output
|
||||
failed_when: "'This node has joined the cluster' not in kubeadm_output.stdout"
|
||||
when: item.when | bool == True
|
||||
with_items:
|
||||
- ipv4: "{{ api_floating_ip }}"
|
||||
port: "{{ api_floating_port }}"
|
||||
when: "{{ groups.k8s_masters | length > 1 }}"
|
||||
- ipv4: "{{ hostvars[initial_master].vpn_ip }}"
|
||||
port: 6443
|
||||
when: "{{ groups.k8s_masters | length == 1 }}"
|
||||
|
||||
- name: Kubeadm output
|
||||
debug: var=kubeadm_output
|
||||
|
||||
when: "'/usr/local/bin/kube-proxy' not in kube_proxy_running.stdout"
|
||||
75
roles/kubernetes/tasks/main.yml
Normal file
75
roles/kubernetes/tasks/main.yml
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
---
|
||||
- set_fact: kube_arch="{{ ansible_architecture | replace('x86_64', 'amd64') | replace('arm', 'armhf') }}"
|
||||
|
||||
- include: modules.yml
|
||||
|
||||
- include: keepalived.yml
|
||||
when:
|
||||
- "'k8s_masters' in group_names"
|
||||
- groups.k8s_masters | length > 1
|
||||
|
||||
# at this point everyone should be able to ping the api floating ip if multimaster
|
||||
# add a test here and don't continue until everyone does not ping the api ip
|
||||
|
||||
- name: Install iputils-ping
|
||||
apt:
|
||||
name: iputils-ping
|
||||
state: latest
|
||||
cache_valid_time: 3600
|
||||
register: result
|
||||
retries: 3
|
||||
until: result is success
|
||||
|
||||
- name: Check all hosts can ping API floating IP
|
||||
shell: "ping {{ api_floating_ip }} -c 1"
|
||||
register: result
|
||||
changed_when: no
|
||||
failed_when: ('100.0% packet loss' in result.stdout)
|
||||
|
||||
|
||||
- include: packages.yml
|
||||
|
||||
- include: kubeadm-token.yml
|
||||
when: inventory_hostname == initial_master
|
||||
|
||||
- include: kubeadm-config.yml
|
||||
|
||||
# add masters
|
||||
- block:
|
||||
# docker-in-docker sometimes hangs pulling images so explicitly do it here
|
||||
- include: docker-images.yml
|
||||
|
||||
- include: kubeadm-master.yml
|
||||
when: inventory_hostname == initial_master
|
||||
|
||||
# then we create the other masters
|
||||
- include: kubeadm-multi.yml
|
||||
when: inventory_hostname != initial_master
|
||||
|
||||
when: "'k8s_masters' in group_names"
|
||||
|
||||
- name: Wait for coredns to be running
|
||||
shell: "kubectl get pods --namespace=kube-system | grep coredns | grep Running | wc -l"
|
||||
register: result
|
||||
until: result.stdout.find("2") != -1
|
||||
retries: 180
|
||||
delay: 10
|
||||
when: inventory_hostname == initial_master
|
||||
|
||||
- include: kubeadm-token.yml
|
||||
when: inventory_hostname == initial_master
|
||||
|
||||
# add non masters
|
||||
- include: kubeadm-worker.yml
|
||||
when: "'k8s_masters' not in group_names"
|
||||
|
||||
# remove this wait and had a test to check all nodes are ready
|
||||
- name: Wait for all nodes to be ready
|
||||
shell: "kubectl get nodes {{ ansible_hostname }} | tail -n+2 | awk '{ print $2 }'"
|
||||
register: result
|
||||
until: result.stdout.find("Ready") == 0
|
||||
retries: 36
|
||||
delay: 10
|
||||
delegate_to: "{{ initial_master }}"
|
||||
|
||||
- debug: var=result
|
||||
30
roles/kubernetes/tasks/modules.yml
Normal file
30
roles/kubernetes/tasks/modules.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
- name: Get the kernel revision
|
||||
shell: "uname -r"
|
||||
register: kernel
|
||||
changed_when: False
|
||||
check_mode: False
|
||||
|
||||
# allow failure as the package may not exist
|
||||
- name: Try install linux-image
|
||||
apt:
|
||||
state: present
|
||||
name: "{{ 'linux-image-' + kernel.stdout }}"
|
||||
register: result
|
||||
failed_when: False
|
||||
|
||||
- name: modprobe
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- ip_vs
|
||||
- nf_conntrack_ipv4
|
||||
|
||||
- name: /etc/modules
|
||||
lineinfile:
|
||||
path: /etc/modules
|
||||
line: "{{ item }}"
|
||||
with_items:
|
||||
- ip_vs
|
||||
- nf_conntrack_ipv4
|
||||
19
roles/kubernetes/tasks/packages.yml
Normal file
19
roles/kubernetes/tasks/packages.yml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
- name: Adding Kubernetes official gpg key
|
||||
apt_key:
|
||||
url: "{{ kubernetes_apt_key }}"
|
||||
state: present
|
||||
|
||||
- name: Adding Kubernetes repository
|
||||
apt_repository:
|
||||
repo: "deb http://apt.kubernetes.io/ kubernetes-{{ kubernetes_release }} {{ kubernetes_apt_channel }}"
|
||||
state: present
|
||||
filename: 'kubernetes'
|
||||
|
||||
- name: Installing kubernetes core components (kubectl, kubelet ...)
|
||||
apt:
|
||||
name: ['kubelet', 'kubeadm', 'kubectl', 'kubernetes-cni']
|
||||
state: latest
|
||||
register: result
|
||||
retries: 3
|
||||
until: result is success
|
||||
Loading…
Add table
Add a link
Reference in a new issue