Cleanup and reorganize vars by environment

This commit is contained in:
Paul-Henri Froidmont 2019-04-09 02:43:15 +02:00
parent 51fb76ffb6
commit f34742ddea
38 changed files with 43 additions and 925 deletions

View file

@ -1,7 +0,0 @@
traefik_serviceaccount: |
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system

View file

@ -1,101 +0,0 @@
traefik_daemonset: |
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: traefik:v1.7-alpine
name: traefik-ingress-lb
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 2
httpGet:
path: /ping
port: 8080
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
failureThreshold: 2
httpGet:
path: /ping
port: 8080
scheme: HTTP
periodSeconds: 5
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "64Mi"
cpu: "250m"
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
securityContext:
privileged: true
volumeMounts:
- name: tls
mountPath: {{k8s_conf_dir}}
readOnly: true
args:
- --checknewversion=false
- --loglevel=INFO
- --defaultentrypoints=http,https
- --entrypoints=Name:http Address::80 Redirect.EntryPoint:https
- --entrypoints=Name:https Address::443 TLS
- --etcd=true
- --etcd.prefix=/traefik
- --etcd.watch=true
- --etcd.endpoint={{groups.k8s_etcd|first}}:2379
- --etcd.tls=true
- --etcd.tls.ca={{k8s_conf_dir}}/ca-etcd.pem
- --etcd.tls.cert={{k8s_conf_dir}}/cert-etcd.pem
- --etcd.tls.key={{k8s_conf_dir}}/cert-etcd-key.pem
- --etcd.useapiv3=true
- --kubernetes=true
- --kubernetes.watch=true
- --kubernetes.namespaces=default
- --web=true
- --web.readonly
- --web.address=:8080
- --acme=true
- --acme.acmelogging=true
- --acme.caserver=https://acme-staging.api.letsencrypt.org/directory
- --acme.entrypoint=https
- --acme.httpchallenge=true
- --acme.httpChallenge.entryPoint=http
- --acme.email=letsencrypt.account@banditlair.com
- --acme.onhostrule
- --acme.storage=/traefik/acme/account
volumes:
- name: tls
secret:
secretName: traefik-etcd

View file

@ -1,40 +0,0 @@
traefik_clusterrole: |
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
traefik_clusterrolebinding: |
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View file

@ -1,6 +1,8 @@
---
ansible_python_interpreter: /usr/bin/python3
kubeadm_enabled: true
harden_linux_root_password: "{{k8s_scaleway_root_password}}"
harden_linux_deploy_user: deploy
harden_linux_deploy_user_password: "{{k8s_scaleway_deploy_user_password}}"
@ -23,4 +25,3 @@ harden_linux_sshguard_whitelist:
- "212.83.165.111"
- "10.3.0.0/24"
- "10.200.0.0/16"

View file

@ -1,19 +0,0 @@
---
ansible_user: root
ansible_port: 22
dashboard_subdomain: dashboard
scaleway_ipaddr: 51.158.77.6
scaleway_reverse_ipaddr: k8s.banditlair.com
harden_linux_sshd_settings_user:
"^Port ": "Port 22"
harden_linux_ufw_rules:
- rule: "allow"
to_port: "22"
protocol: "tcp"
- rule: "allow"
to_port: "7000"
protocol: "udp"
docker_version: 17.03.*

View file

@ -1,10 +1,11 @@
---
kubeadm_enabled: true
ip: "{{vpn_ip}}"
kube_network_plugin: flannel
bin_dir: /usr/local/bin
kube_api_anonymous_auth: true
skip_non_kubeadm_warning: false
helm_enabled: true
ingress_nginx_enabled: true
ingress_nginx_host_network: true
ingress_nginx_nodeselector:
node-role.kubernetes.io/node: ""
cert_manager_enabled: true
cert_manager_enabled: true

View file

@ -1,2 +0,0 @@
---
vpn_ip: 192.168.66.{{ 10 +( inventory_hostname|regex_replace('\D+','')|int) }}

View file

@ -1,3 +0,0 @@
---
vpn_ip: 192.168.66.{{ 0 +(inventory_hostname|regex_replace('\D+','')|int) }}
keepalived_ip: "192.168.66.254"

View file

@ -1,2 +0,0 @@
---
vpn_ip: 192.168.66.{{ 100 +( inventory_hostname|regex_replace('\D+','')|int) }}

View file

@ -0,0 +1,3 @@
---
cluster_name: banditlair
dns_domain: banditlair.com

17
inventories/prod/groups Normal file
View file

@ -0,0 +1,17 @@
[prod-master]
[prod-etcd]
[prod-node]
[kube-master:children]
prod-master
[etcd:children]
prod-etcd
[kube-node:children]
prod-node
[k8s-cluster:children]
kube-master
etcd
kube-node

View file

@ -0,0 +1,12 @@
plugin: scaleway
hostnames:
- hostname
regions:
- par1
- ams1
tags:
- prod-master
- prod-etcd
- prod-node
variables:
ansible_host: public_ip.address

View file

@ -1,6 +1,3 @@
---
ip: "{{vpn_ip}}"
kube_network_plugin: flannel
bin_dir: /usr/local/bin
cluster_name: banditlair-staging
dns_domain: staging.k8s.banditlair.com
cluster_name: banditlair-test
dns_domain: test.k8s.banditlair.com

View file

@ -5,7 +5,7 @@
tags: tinc
- name: Include kubespray tasks
import_playbook: kubespray/cluster.yml
import_playbook: kubespray.yml
# - hosts: k8s_proxy:k8s_masters:k8s_workers
# roles:

1
kubespray.yml Symbolic link
View file

@ -0,0 +1 @@
kubespray/cluster.yml

View file

@ -1,13 +0,0 @@
etcd_name: "etcd_{{ ansible_hostname }}"
#etcd_advertise_client_urls: "http://{{ ansible_hostname }}:2379"
etcd_advertise_client_urls: "http://{{ vpn_ip }}:2379"
#etcd_listen_client_urls: 'http://{{ ansible_hostname }}:2379,http://127.0.0.1:2379'
etcd_listen_client_urls: 'http://{{ vpn_ip }}:2379,http://127.0.0.1:2379'
#etcd_initial_advertise_peer_urls: "http://{{ ansible_hostname }}:2380"
etcd_initial_advertise_peer_urls: "http://{{ vpn_ip }}:2380"
#etcd_listen_peer_urls: "http://{{ ansible_hostname }}:2380"
etcd_listen_peer_urls: "http://{{ vpn_ip }}:2380"
# Create the list of initial cluster by using a template lookup
etcd_initial_cluster: "{{ lookup('template', 'templates/initial_cluster.j2') | replace('\n', '')}}"
etcd_initial_cluster_token: "token_{{ hostvars[initial_master]['ansible_hostname'] }}"
etcd_initial_cluster_state: "new"

View file

@ -1,29 +0,0 @@
---
- name: etcd replicated and outiside of kubeadm when multimasters
block:
- name: Running etcd container on masters nodes
docker_container:
name: etcd
image: "quay.io/coreos/etcd:v{{etcd_version}}"
state: started
detach: True
ports:
- "0.0.0.0:2380:2380"
- "0.0.0.0:2379:2379"
command: [
"etcd",
"--name {{ etcd_name }}",
"--initial-advertise-peer-urls {{ etcd_initial_advertise_peer_urls }}",
"--listen-peer-urls {{ etcd_listen_peer_urls }}",
"--advertise-client-urls {{ etcd_advertise_client_urls }}",
"--listen-client-urls {{ etcd_listen_client_urls }}",
"--initial-cluster {{ etcd_initial_cluster }}",
"--initial-cluster-state {{ etcd_initial_cluster_state }}",
"--initial-cluster-token {{ etcd_initial_cluster_token }}"
]
network_mode: host
restart_policy: always
when:
- groups.k8s_masters|length > 1

View file

@ -1 +0,0 @@
{% for svrs in ['k8s_masters'] %}{% for host in groups[svrs] %}etcd_{{ hostvars[host].ansible_hostname }}=http://{{ hostvars[host].vpn_ip }}:2380{% if not loop.last %},{% endif %}{% endfor %}{% endfor %}

View file

@ -1,24 +0,0 @@
---
# Kubernetes
kubernetes_apt_key: https://packages.cloud.google.com/apt/doc/apt-key.gpg
kubernetes_apt_channel: main
kubernetes_release: xenial
kubernetes_version: 1.11.3
kubernetes_version_apt: "{{kubernetes_version}}-00"
kubernetes_port: 6443
# kubeadm
kubeadm_ignore_preflight_errors: ""
kubelet_fail_swap_on: True
# Flannel
cni_version: v0.10.0
# these will determine the number of pods you can run
# cirdr should be at least /16 https://kubernetes.io/docs/admin/kubeadm/
pod_subnet: 10.244.0.0/16
# floating balanced ip for k8s api
api_floating_ip: 192.168.66.253
api_floating_mask: 24
api_floating_port: 6443
router_id: 66

View file

@ -1,6 +0,0 @@
---
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet
command: systemctl restart kubelet

View file

@ -1,40 +0,0 @@
---
# flannel deployment
- name: Checking if flannel exists
shell: "ip link | awk '$2 ~ /^(flannel|cni)/ { print $0 }' | wc -l | awk '{ print $1 }'"
register: cni_deployment
changed_when: False
check_mode: False
- block:
- name: Determine physical interface to use with cni
shell: "ip route get 169.254.42.42 | head -n1 | sed -E 's/.+ dev ([^ ]+).+/\\1/'"
register: cni_interface
changed_when: False
check_mode: False
failed_when: "cni_interface.stdout is not match('^[a-z][a-z0-9]+$')"
- name: Create directories
file:
path: "{{ item }}"
state: directory
mode: 0755
with_items:
- /etc/kube-flannel
- /etc/cni/net.d
- /opt/cni/bin
- template:
src: kube-flannel.yml.j2
dest: /tmp/kube-flannel.yml
- name: Configure cni
shell: "kubectl apply -f /tmp/kube-flannel.yml"
register: cni_output
# flannel has trouble unless we restart the kubelet service
# we'll flush_handlers later
notify: restart kubelet
- debug: var="cni_output"
when: "cni_deployment.stdout != '2'"

View file

@ -1,14 +0,0 @@
---
- name: Pull docker images
docker_image: name="{{ item }}"
with_items:
- "k8s.gcr.io/kube-apiserver-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-controller-manager-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-proxy-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/kube-scheduler-{{ kube_arch }}:{{ kubernetes_version }}"
- "k8s.gcr.io/pause-{{ kube_arch }}:3.1"
- "quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}"
- name: Pull etcd if not multimaster
docker_image: name="k8s.gcr.io/etcd-{{ kube_arch }}:{{ etcd_version }}"
when: groups.k8s_masters | length == 1

View file

@ -1,31 +0,0 @@
---
- name: Creating /etc/keepalived on master nodes
file:
path: /etc/keepalived
state: directory
- name: Templating /etc/keepalived/keepalived.conf
template:
src: keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
- name: Running keepalived container on masters nodes
docker_container:
name: keepalived_api
image: "chmod666/keepalived:latest"
state: started
detach: True
volumes:
- /etc/keepalived/keepalived.conf:/usr/local/etc/keepalived/keepalived.conf
capabilities:
- NET_ADMIN
network_mode: host
restart_policy: always
- name: Wait for keepalived to be started
shell: 'docker ps | grep chmod666/keepalived | grep "Up"'
register: result
until: result.stdout.find("chmod666/keepalived") != -1
retries: 18
delay: 10
changed_when: no

View file

@ -1,29 +0,0 @@
---
# generating the kubeadm config file only on master nodes
- name: Creating kubeadm_config file
template:
src: kubeadm-config.j2
dest: /tmp/kubeadm_config
when:
- groups.k8s_masters | length > 1
- "'k8s_masters' in group_names"
# KUBELET_EXTRA_ARGS
- name: Additional configuration
template:
src: local-extras.conf.j2
dest: /etc/systemd/system/kubelet.service.d/90-local-extras.conf
mode: 0640
when:
- "kubelet_fail_swap_on == False"
notify:
- reload systemd
- restart kubelet
- meta: flush_handlers
- name: Creating .kube file in $HOME
file:
path: ~/.kube
state: directory
when: "'k8s_masters' in group_names"

View file

@ -1,59 +0,0 @@
---
# we get kubectl output to check if we have to add the node or not
# errors are ignored in case of the first initialization on the first master
- name: Getting kubectl output
shell: "kubectl get nodes"
register: kubectl_output
changed_when: False
check_mode: False
failed_when: kubectl_output.rc != 0 and not 'did you specify the right host' in kubectl_output.stderr
- block:
- name: Kubeadm init on first master in multimaster cluster
shell: |
kubeadm init \
--config /tmp/kubeadm_config
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
register: kubeadm_output
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
when: groups.k8s_masters | length > 1
- name: Kubeadm init on sole master
shell: |
kubeadm init \
--apiserver-advertise-address={{ vpn_ip }} \
--pod-network-cidr={{ pod_subnet }} \
--kubernetes-version {{ kubernetes_version }}
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
# flannel has trouble unless we restart the kubelet service
notify: restart kubelet
when:
- groups.k8s_masters | length == 1
- inventory_hostname == initial_master
- name: Kubeadm output
debug: var=kubeadm_output
when: ansible_hostname not in kubectl_output.stdout
- name: Copying /etc/kubernetes/admin.conf to ~/.kube/config
copy:
src: /etc/kubernetes/admin.conf
dest: ~/.kube/config
remote_src: yes
- include: cni.yml
- name: Wait for master to be ready
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
register: result
until: result.stdout.find("Ready") == 0
retries: 36
delay: 10
changed_when: no
- meta: flush_handlers

View file

@ -1,40 +0,0 @@
---
- name: Distribute /etc/kubernetes/pki to other masters
synchronize:
src: /etc/kubernetes/pki/
dest: /etc/kubernetes/pki/
recursive: True
delegate_to: "{{ initial_master }}"
# forward ssh agent by preserving environment variables with sudo
become_flags: "-E"
- block:
- name: Initializing other masters
shell: |
kubeadm init \
--config /tmp/kubeadm_config
{%- if kubeadm_ignore_preflight_errors | length > 0 %} \
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }}
{% endif %}
register: kubeadm_output
failed_when: "'Your Kubernetes master has initialized successfully' not in kubeadm_output.stdout"
- name: Kubeadm output
debug: var=kubeadm_output
when: ansible_hostname not in hostvars[initial_master]['kubectl_output'].stdout
# fixing kubectl
- name: kubectl config
copy:
src: /etc/kubernetes/kubelet.conf
dest: /root/.kube/config
remote_src: True
- name: Wait for master to be ready
shell: "kubectl get nodes $(hostname) | tail -n+2 | awk '{ print $2 }'"
register: result
until: result.stdout.find("Ready") == 0
retries: 36
delay: 10
changed_when: no

View file

@ -1,26 +0,0 @@
---
- block:
- name: Get an existing kubeadm join token
shell: |
kubeadm token list 2>/dev/null \
| awk '$4 == "authentication,signing" { print $1 }' \
| head -n1
register: kubeadm_token_list
changed_when: False
check_mode: False
failed_when: False
- name: Generate a new kubeadm token
shell: "kubeadm token create 2>/dev/null || kubeadm token generate"
register: kubeadm_token_create
when: kubeadm_token_list.stdout | length == 0
- set_fact:
kubeadm_token: |-
{%- if kubeadm_token_list.stdout | length > 0 -%}
{{ kubeadm_token_list.stdout }}
{%- else -%}
{{ kubeadm_token_create.stdout }}
{%- endif -%}
when: kubeadm_token|default('') | length == 0

View file

@ -1,32 +0,0 @@
---
- name: Checking if kube-proxy is Running
shell: "ps -ef | grep [k]ube-proxy"
register: kube_proxy_running
ignore_errors: True
changed_when: no
- block:
- name: Joining cluster on other nodes
shell: |
kubeadm join \
--token="{{ hostvars[initial_master].kubeadm_token }}" \
{{ item.ipv4 }}:{{ item.port }} \
{%- if kubeadm_ignore_preflight_errors | length > 0 %}
--ignore-preflight-errors={{ kubeadm_ignore_preflight_errors }} \
{% endif %}
--discovery-token-unsafe-skip-ca-verification
register: kubeadm_output
failed_when: "'This node has joined the cluster' not in kubeadm_output.stdout"
when: item.when | bool == True
with_items:
- ipv4: "{{ api_floating_ip }}"
port: "{{ api_floating_port }}"
when: "{{ groups.k8s_masters | length > 1 }}"
- ipv4: "{{ hostvars[initial_master].vpn_ip }}"
port: 6443
when: "{{ groups.k8s_masters | length == 1 }}"
- name: Kubeadm output
debug: var=kubeadm_output
when: "'/usr/local/bin/kube-proxy' not in kube_proxy_running.stdout"

View file

@ -1,67 +0,0 @@
---
- set_fact: kube_arch="{{ ansible_architecture | replace('x86_64', 'amd64') | replace('arm', 'armhf') }}"
- include: modules.yml
- include: keepalived.yml
when:
- "'k8s_masters' in group_names"
- groups.k8s_masters | length > 1
- name: Check all hosts can ping API floating IP
shell: "ping {{ api_floating_ip }} -c 1"
register: result
until: ('100% packet loss' not in result.stdout)
retries: 15
delay: 10
changed_when: no
- include: packages.yml
- include: kubeadm-token.yml
when: inventory_hostname == initial_master
- include: kubeadm-config.yml
# add masters
- block:
# docker-in-docker sometimes hangs pulling images so explicitly do it here
#- include: docker-images.yml
- include: kubeadm-master.yml
when: inventory_hostname == initial_master
# then we create the other masters
- include: kubeadm-multi.yml
when: inventory_hostname != initial_master
when: "'k8s_masters' in group_names"
- name: Wait for coredns to be running
shell: "kubectl get pods --namespace=kube-system | grep coredns | grep Running | wc -l"
register: result
until: ("2" in result.stdout)
retries: 180
delay: 10
changed_when: no
when: inventory_hostname == initial_master
- include: kubeadm-token.yml
when: inventory_hostname == initial_master
# add non masters
- include: kubeadm-worker.yml
when: "'k8s_masters' not in group_names"
# remove this wait and had a test to check all nodes are ready
- name: Wait for all nodes to be ready
shell: "kubectl get nodes {{ ansible_hostname }} | tail -n+2 | awk '{ print $2 }'"
register: result
until: ("Ready" in result.stdout)
retries: 36
delay: 10
changed_when: no
delegate_to: "{{ initial_master }}"
- debug: var=result

View file

@ -1,30 +0,0 @@
---
- name: Get the kernel revision
shell: "uname -r"
register: kernel
changed_when: False
check_mode: False
# allow failure as the package may not exist
- name: Try install linux-image
apt:
state: present
name: "{{ 'linux-image-' + kernel.stdout }}"
register: result
failed_when: False
- name: modprobe
modprobe:
name: "{{ item }}"
state: present
with_items:
- ip_vs
- nf_conntrack_ipv4
- name: /etc/modules
lineinfile:
path: /etc/modules
line: "{{ item }}"
with_items:
- ip_vs
- nf_conntrack_ipv4

View file

@ -1,18 +0,0 @@
---
- name: Adding Kubernetes official gpg key
apt_key:
url: "{{ kubernetes_apt_key }}"
state: present
- name: Adding Kubernetes repository
apt_repository:
repo: "deb http://apt.kubernetes.io/ kubernetes-{{ kubernetes_release }} {{ kubernetes_apt_channel }}"
state: present
filename: 'kubernetes'
- name: Installing kubernetes core components (kubectl, kubelet ...)
apt:
name: ['kubelet={{kubernetes_version_apt}}', 'kubeadm={{kubernetes_version_apt}}', 'kubectl={{kubernetes_version_apt}}']
register: result
retries: 3
until: result is success

View file

@ -1,56 +0,0 @@
global_defs {
default_interface {{vpn_interface}}
}
vrrp_instance VI_1 {
interface {{vpn_interface}}
track_interface {
{{vpn_interface}}
}
{% if inventory_hostname == initial_master %}
state MASTER
priority 100
{% else %}
state BACKUP
priority 50
{% endif %}
virtual_router_id {{ router_id }}
nopreempt
unicast_peer {
{% for host in groups['k8s_masters'] %}
{{ hostvars[host]['vpn_ip'] }}
{% endfor %}
}
virtual_ipaddress {
{{ api_floating_ip }}/{{ api_floating_mask }}
}
authentication {
auth_type PASS
auth_pass d0cker
}
notify "/container/service/keepalived/assets/notify.sh"
}
virtual_server {{ api_floating_ip }} {{ api_floating_port }} {
delay_loop 10
protocol TCP
lb_algo rr
# Use direct routing
lb_kind DR
persistence_timeout 7200
{% for host in groups['k8s_masters'] %}
real_server {{ hostvars[host]['vpn_ip'] }} {{ api_floating_port }} {
weight 1
TCP_CHECK {
connect_timeout 5
connect_port 6443
}
}
{% endfor %}
}

View file

@ -1,36 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: keepalived
namespace: kube-system
spec:
hostNetwork: true
volumes:
- hostPath:
path: /etc/keepalived/keepalived.conf
type: File
name: keepalived-config
containers:
- name: keepalived
image: chmod666/keepalived:latest
# if tag is latest imagePullPolicy is always
# but when keepalived is backup a proxy may have no connection to the internet
# to avoid keepalived not starting in that case, we're putting imagePullPolicy: IfNotPresent
# assuming the image was already be pulled at cluster creation. Neat.
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/usr/local/etc/keepalived/keepalived.conf"
name: keepalived-config
securityContext:
capabilities:
add:
- NET_ADMIN
#env:
# - name: KEEPALIVED_INTERFACE
# value: tun0
# - name: KEEPALIVED_UNICAST_PEERS
# value: "#PYTHON2BASH:['{{ groups['masters'] | map('extract', hostvars, ['vpn_ip']) | join("', '") }}']"
# - name: KEEPALIVED_VIRTUAL_IPS
# value: "#PYTHON2BASH:['{{ api_floating_ip }}/{{ api_floating_mask }}']"
# - name: KEEPALIVED_PRIORITY
# value: "{{ groups['masters'].index(inventory_hostname) + 1 }}"

View file

@ -1,160 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: {{ kube_arch }}
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:{{ cni_version }}-{{ kube_arch }}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface={{ cni_interface.stdout | trim }}
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View file

@ -1,27 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
api:
advertiseAddress: {{ api_floating_ip if groups.k8s_masters | length > 1 else hostvars[initial_master].vpn_ip }}
etcd:
external:
endpoints:
{% for host in groups['k8s_masters'] %}
- "http://{{ hostvars[host]['vpn_ip'] }}:2379"
{% endfor %}
networking:
podSubnet: "{{ pod_subnet }}"
kubernetesVersion: "v{{ kubernetes_version }}"
apiServerCertSANs:
{% for host in groups['k8s_masters'] %}
- "{{ hostvars[host]['vpn_ip'] }}"
{% endfor %}
- "{{ api_floating_ip }}"
- "127.0.0.1"
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: "{{ hostvars[initial_master].kubeadm_token }}"
ttl: 0s
usages:
- signing
- authentication

View file

@ -1,2 +0,0 @@
[Service]
Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false"