Tinc setup

This commit is contained in:
Paul-Henri Froidmont 2018-09-18 04:00:12 +02:00
parent 3bcd961c81
commit e954247db5
20 changed files with 584 additions and 59 deletions

View file

@ -1,4 +1,5 @@
---
initial_master: master1
scw_token: "{{ scw_token_vault }}"
scw_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRX1scknsDkFvi1DRfNzYKPpyn9x4tiPjqkSlCQnXtmZUmK8ssYAQrM9iSIszT1tr5nQERBAHtUMjSJN8Ofi42LCJWakdYiSQSaSx3kM4TpYx8bKTEX2oxdifOovaGyn7jz8DmTipJLlrxjkQZ0HU8f6lhNPpke/jGioH6lvVtUVVDb1Ny+ygvoJsZHPuU/KSSnFED91sNrSoE8NGa29gPBrDMUZHSZVJW8+c0DWENxKpu7TKx/s64SsT3jX6gx76J/umvS7OfDu1SXg9lX6+1OUQMexjRImmzUy4VFrJAf9iAVvwYI5RlcLR9j2DbNBg0gikLAc+1OeBQcGrwYzid froidmpa@froidmpa-2017-07-31

2
group_vars/k8s_masters Normal file
View file

@ -0,0 +1,2 @@
---
vpn_ip: 192.168.66.{{ 10 +( inventory_hostname|regex_replace('\D+','')|int) }}

2
group_vars/k8s_proxy Normal file
View file

@ -0,0 +1,2 @@
---
vpn_ip: 192.168.66.{{ 0 +(inventory_hostname|regex_replace('\D+','')|int) }}

2
group_vars/k8s_workers Normal file
View file

@ -0,0 +1,2 @@
---
vpn_ip: 192.168.66.{{ 100 +( inventory_hostname|regex_replace('\D+','')|int) }}

View file

@ -1,9 +0,0 @@
[ansible-controller]
localhost ansible_connection=local
[k8s_kubectl]
localhost ansible_connection=local
[k8s_ca]
localhost ansible_connection=local

104
k8s.yml
View file

@ -1,52 +1,56 @@
---
- hosts: localhost
become: yes
gather_facts: no
- hosts: k8s_proxy:k8s_masters:k8s_workers
roles:
- role: harden-linux
tags: role-harden-linux
- role: githubixx.peervpn
tags: role-peervpn
- hosts: k8s
vars:
ansible_user: ubuntu
gather_facts: no
roles:
- role: harden-linux
tags: role-harden-linux
- hosts: all
become: yes
roles:
- role: peervpn
tags: role-peervpn
- hosts: k8s_ca
become: yes
gather_facts: no
roles:
- role: cfssl
tags: role-cfssl
- role: kubernetes-ca
tags: role-kubernetes-ca
- hosts: k8s_etcd
become: yes
gather_facts: no
roles:
- role: etcd
tags: role-etcd
- hosts: k8s_master
gather_facts: no
roles:
- role: kubernetes-controller
tags: role-kubernetes-controller
- hosts: k8s_worker
gather_facts: no
roles:
- role: githubixx.kubernetes-worker
tags: role-kubernetes-worker
- hosts: k8s
gather_facts: no
roles:
- role: githubixx.flanneld
tags: role-kubernetes-flanneld
- role: githubixx.docker
tags: role-docker
- role: proxy
tags: proxy
#- hosts: localhost
# become: yes
# gather_facts: no
# roles:
# - role: harden-linux
# tags: role-harden-linux
# - role: githubixx.peervpn
# tags: role-peervpn
#- hosts: k8s
# vars:
# ansible_user: ubuntu
# gather_facts: no
# roles:
# - role: harden-linux
# tags: role-harden-linux
#- hosts: all
# become: yes
# roles:
# - role: peervpn
# tags: role-peervpn
#- hosts: k8s_ca
# become: yes
# gather_facts: no
# roles:
# - role: cfssl
# tags: role-cfssl
# - role: kubernetes-ca
# tags: role-kubernetes-ca
#- hosts: k8s_etcd
# become: yes
# gather_facts: no
# roles:
# - role: etcd
# tags: role-etcd
#- hosts: k8s_master
# gather_facts: no
# roles:
# - role: kubernetes-controller
# tags: role-kubernetes-controller
#- hosts: k8s_worker
# gather_facts: no
# roles:
# - role: githubixx.kubernetes-worker
# tags: role-kubernetes-worker
#- hosts: k8s
# gather_facts: no
# roles:
# - role: githubixx.flanneld
# tags: role-kubernetes-flanneld
# - role: githubixx.docker
# tags: role-docker

View file

@ -0,0 +1,24 @@
---
# the ip address prefix applied to tun* devices, e.g. assuming that
# the default of 10.50. is used and tun4 is used, the addresses
# 10.50.4.1 and 10.50.4.2 will be used on both nodes; the trailing
# period must be included.
proxy_ssh_ip_prefix: 10.50.
# the name of the private interface which is reachable by other hosts
proxy_private_interface: "" # autodetected if empty
# use 1 for tun1, 10 for tun10 etc
proxy_ssh_tun_index_min: 1
# the ssh proxy user created on all hosts
proxy_ssh_user: sshproxy
# the timeout in seconds when checking urls for internet access
proxy_test_timeout: 5
# the addresses to try calling when checking for internet access
proxy_test_urls:
- https://archlinux.org/
- https://letsencrypt.org/
- http://mirrors.ubuntu.com/

View file

@ -0,0 +1,5 @@
---
- name: reload sshd
systemd:
name: sshd
state: reloaded

View file

@ -0,0 +1,2 @@
---
- debug: msg="Not Implemented"

View file

@ -0,0 +1,74 @@
---
- name: Get the internet interface
shell: ip route get 1.1.1.1 | head -n1 | sed -E 's/^.+dev ([^ ]+).+$/\1/'
register: interface_result
changed_when: False
check_mode: False
- name: Check for internet access
shell: |-
false \{% for url in proxy_test_urls %}
|| curl -IsSL -m{{ proxy_test_timeout }} {{ url }} \
{% endfor %}
|| false
args:
warn: False
register: curl_result_initial
ignore_errors: True
changed_when: False
check_mode: False
- name: Set host interface facts
set_fact:
proxy_interface: "{{ interface_result.stdout | trim }}"
proxy_inet: "{{ curl_result_initial.rc == 0 }}"
- name: Assert at least one node has internet connectivity
assert:
that: hostvars.values() | selectattr('inventory_hostname', 'in', groups['k8s']) | selectattr('proxy_inet', '==', True) | list | length != 0
run_once: true
- name: Set router hostname fact
set_fact:
proxy_router_hostname: "{{ hostvars.values() | selectattr('inventory_hostname', 'in', groups['k8s']) | selectattr('proxy_inet', '==', True) | map(attribute='inventory_hostname') | first }}"
- name: Allow ip forwarding (kernel)
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: True
reload: True
- name: Allow ip forwarding (iptables)
iptables:
table: nat
chain: POSTROUTING
out_interface: "{{ proxy_interface }}"
jump: MASQUERADE
register: iptables_result
- name: Set up SSH tunnels
include: ssh-up.yml
when: hostvars.values() | selectattr('inventory_hostname', 'in', groups['k8s']) | selectattr('proxy_inet', '==', False) | list | length != 0
- name: Set up tinc
include_role:
name: tinc
- name: Set up keepalived
include: keepalived.yml
- name: Tear down SSH tunnels
include: ssh-down.yml
- name: Check for internet access
shell: |-
false \{% for url in proxy_test_urls %}
|| curl -IsSL -m{{ proxy_test_timeout }} {{ url }} \
{% endfor %}
|| false
args:
warn: False
register: curl_result
changed_when: curl_result_initial.rc != curl_result.rc
check_mode: False

View file

@ -0,0 +1,52 @@
---
- name: Kill existing tunnel connections
shell: |
CONNECTION_PIDS=$(ps aux | awk '$1 == "{{ proxy_ssh_user }}" && $0 ~ /ssh -N -f/ { print $2 }')
echo $CONNECTION_PIDS | xargs -r kill
echo "$CONNECTION_PIDS" | grep -vE '^$' | wc -l
register: ssh_result
changed_when: ssh_result.stdout_lines | last | int > 0
- name: Remove tunnel interfaces
shell:
cmd: |
bash -s <<'EOF'
TUN_INTERFACE_FILES=$(grep -El '^## sshproxy' /etc/network/interfaces.d/tun*)
IFS=$'\n\t'
for file in $TUN_INTERFACE_FILES; do
interface=$(basename $file)
echo $interface
rm $file
ip link delete $interface
done
EOF
register: tun_result
changed_when: tun_result.stdout_lines | length > 0
- name: Remove tunnel iptables (1/2)
iptables:
state: absent
chain: FORWARD
in_interface: "{{ proxy_interface }}"
out_interface: "{{ item }}"
ctstate:
- RELATED
- ESTABLISHED
jump: ACCEPT
with_items: "{{ tun_result.stdout_lines }}"
when: inventory_hostname == proxy_router_hostname
- name: Remove tunnel iptables (2/2)
iptables:
state: absent
chain: FORWARD
in_interface: "{{ item }}"
out_interface: "{{ proxy_interface }}"
jump: ACCEPT
with_items: "{{ tun_result.stdout_lines }}"
when: inventory_hostname == proxy_router_hostname
- name: Remove authorized keys file
file:
path: "/home/{{ proxy_ssh_user }}/.ssh/authorized_keys"
state: absent

View file

@ -0,0 +1,131 @@
---
# All nodes may use the ssh proxy user to connect to any other node
# to use point-to-point tunnelling.
# remove existing interfaces in case of an error so we don't run out of ip addresses
- name: Remove existing tunnel interfaces
include: ssh-down.yml
- name: Add the proxy user
user:
name: "{{ proxy_ssh_user }}"
shell: /bin/false
- name: Allow point-to-point tunnelling for the ssh proxy user
blockinfile:
block: |
# sshproxy start
Match User {{ proxy_ssh_user }}
AllowTcpForwarding yes
PermitTunnel point-to-point
ForceCommand /bin/false
# sshproxy end
dest: "/etc/ssh/sshd_config"
validate: "/usr/sbin/sshd -T -f %s"
notify: reload sshd
- meta: flush_handlers
- name: Create ssh private key
shell: yes '' | ssh-keygen -N ''
args:
chdir: "/home/{{ proxy_ssh_user }}"
creates: "/home/{{ proxy_ssh_user }}/.ssh/id_rsa.pub"
become: True
become_method: su
become_flags: '-s /bin/sh'
become_user: "{{ proxy_ssh_user }}"
- name: Read public key
slurp:
src: "/home/{{ proxy_ssh_user }}/.ssh/id_rsa.pub"
register: public_key_result
- name: Set public key fact
set_fact:
proxy_ssh_public_key: "{{ public_key_result.content | b64decode | trim }}"
- block:
- name: Set authorized keys
authorized_key:
user: "{{ proxy_ssh_user }}"
key: "{{ item }}"
with_items: "{{ hostvars.values() | selectattr('inventory_hostname', 'in', groups['k8s']) | selectattr('proxy_ssh_public_key', 'defined') | map(attribute='proxy_ssh_public_key') | list }}"
- name: Create tunnel interfaces on the router host
shell:
cmd: |
bash -s <<'EOF'
{{ lookup('template', './tunnel.j2') }}
EOF
register: tun_result
with_items: "{{ hostvars.values() | selectattr('inventory_hostname', 'in', groups['k8s']) | selectattr('proxy_inet', '==', False) | map(attribute='inventory_hostname') | list }}"
- name: Set created interfaces fact
set_fact:
proxy_ssh_tunnel_map: "{{ tun_result.results | map(attribute='stdout') | map('from_json') | list }}"
- name: Add tunnel iptables (1/2)
iptables:
chain: FORWARD
in_interface: "{{ proxy_interface }}"
out_interface: "{{ item.interface }}"
ctstate:
- RELATED
- ESTABLISHED
jump: ACCEPT
with_items: "{{ proxy_ssh_tunnel_map }}"
- name: Add tunnel iptables (2/2)
iptables:
chain: FORWARD
in_interface: "{{ item.interface }}"
out_interface: "{{ proxy_interface }}"
jump: ACCEPT
with_items: "{{ proxy_ssh_tunnel_map }}"
when: inventory_hostname == proxy_router_hostname
- block:
- set_fact: proxy_target="{{ ( hostvars[proxy_router_hostname].proxy_ssh_tunnel_map | selectattr('target_hostname', '==', inventory_hostname) | list )[0] }}"
- block:
- shell: "ip route get 169.254.42.42 | head -n1 | sed -E 's/.+ dev ([^ ]+).+/\\1/'"
register: result
- set_fact: proxy_private_interface="{{ result.stdout }}"
when: proxy_private_interface | length == 0
# TODO edit /etc/network/interfaces (?) to apply rules on network up
- name: Create tunnel interface on the target hosts
shell:
cmd: |
bash -s <<'EOF'
echo "## sshproxy" > /etc/network/interfaces.d/{{ proxy_target.interface }}
ip tuntap add mode tun dev {{ proxy_target.interface }}
ip addr add {{ proxy_target.target_ip }}/30 peer {{ proxy_target.router_ip }} dev {{ proxy_target.interface }}
ip link set dev {{ proxy_target.interface }} up
DEFAULT_GATEWAY=$(ip route | awk '$3 == "{{ proxy_private_interface }}" { print $1 }' | cut -d'/' -f1)
ip route add 10.0.0.0/8 via $DEFAULT_GATEWAY dev {{ proxy_private_interface }}
ip route add 169.254.0.0/16 via $DEFAULT_GATEWAY dev {{ proxy_private_interface }}
ip route add 172.16.0.0/12 via $DEFAULT_GATEWAY dev {{ proxy_private_interface }}
ip route add 192.168.0.0/16 via $DEFAULT_GATEWAY dev {{ proxy_private_interface }}
ip route replace default via {{ proxy_target.target_ip }}
EOF
- name: Establish the SSH tunnel
shell: |
ssh -N -f \
-w {{ proxy_target.interface_id }}:{{ proxy_target.interface_id }} \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
{{ hostvars[proxy_router_hostname]['ansible_' + proxy_private_interface].ipv4.address }} &
echo $!
become: True
become_method: su
become_flags: '-s /bin/sh'
become_user: "{{ proxy_ssh_user }}"
when: inventory_hostname in hostvars[proxy_router_hostname].proxy_ssh_tunnel_map | map(attribute='target_hostname') | list

View file

@ -0,0 +1,40 @@
#!/bin/bash
for i in `seq {{ proxy_ssh_tun_index_min }} 255`; do
interface=tun${i}
file=/etc/network/interfaces.d/$interface
ln -s /dev/null "$file" 2>/dev/null
if [[ $? -eq 0 ]]; then
INTERFACE_ID=$i
IP_SELF={{ proxy_ssh_ip_prefix }}${INTERFACE_ID}.2
IP_PEER={{ proxy_ssh_ip_prefix }}${INTERFACE_ID}.1
tmp=$(mktemp)
cat <<EOF2 | sed -E 's/^ {8}//' | tee "$tmp" >/dev/null
## sshproxy
#{ "router_hostname": "{{ ansible_hostname }}",
# "router_ip": "$IP_SELF",
# "target_hostname": "{{ item }}",
# "target_ip": "$IP_PEER",
# "interface_id": $INTERFACE_ID,
# "interface": "$interface"
#}
EOF2
mv -f "$tmp" "$file"
break
fi
done
if [[ -z "$INTERFACE_ID" ]]; then
echo "ERR could not create tunnel interface" >&2
exit 1
fi
set -e
ip tuntap add mode tun dev $interface
ip addr add $IP_SELF/30 peer $IP_PEER dev $interface
ip link set dev $interface up
grep -E "^#" "$file" | grep -vE "^##" | sed -E "s/^#//"

View file

@ -0,0 +1,10 @@
---
api_floating_ip: 192.168.66.253
netname: meshvpn
scw_private_domain: priv.cloud.scaleway.com
tinc_primary_router: proxy0
tinc_route_default_ip: 192.168.66.1
tinc_route_get_ip: 169.254.42.42
vpn_interface: tun0
vpn_netmask: 255.255.255.0
vpn_subnet_cidr_netmask: 32

View file

@ -0,0 +1,14 @@
---
- name: restart tinc
systemd:
name: "{{ item }}"
state: restarted
with_items:
- tinc
- name: reload tinc
systemd:
name: "{{ item }}"
state: reloaded
with_items:
- tinc

142
roles/tinc/tasks/main.yml Normal file
View file

@ -0,0 +1,142 @@
---
- name: install tinc
apt:
name: tinc
state: latest
- name: ensure tinc netname directory exists
file:
path: /etc/tinc/{{ netname }}/hosts
recurse: True
state: directory
- name: create /etc/tinc/nets.boot file from template
template:
src: nets.boot.j2
dest: /etc/tinc/nets.boot
notify:
- restart tinc
- name: ensure tinc.conf contains connection to all other nodes
template:
src: tinc.conf.j2
dest: /etc/tinc/{{ netname }}/tinc.conf
notify:
- restart tinc
- reload tinc
- name: create tinc-up file
template:
src: tinc-up.j2
dest: /etc/tinc/{{ netname }}/tinc-up
mode: 0755
notify:
- restart tinc
- name: create tinc-down file
template:
src: tinc-down.j2
dest: /etc/tinc/{{ netname }}/tinc-down
mode: 0755
notify:
- restart tinc
- name: ensure tinc hosts file binds to scaleway dns address
block:
- shell: "/usr/local/bin/scw-metadata ID"
register: scw_id
- lineinfile:
dest: /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
line: "Address = {{ scw_id.stdout }}.{{ scw_private_domain }}"
create: yes
notify:
- restart tinc
when: tinc_ignore_scaleway_dns | default(False) | bool == False
- name: ensure tinc hosts file binds to physical ip address
lineinfile:
dest: /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
line: |-
{%- if "k8s_proxy" in group_names -%}
{%- set interface = 'ansible_' + tinc_private_interface | default('eth0') -%}
Address = {{ vars[interface].ipv4.address }}
{%- else -%}
Address = {{ ansible_eth0.ipv4.address }}
{%- endif -%}
create: yes
notify:
- restart tinc
when: tinc_ignore_scaleway_dns | default(False) | bool == True
- name: ensure subnet ip address is properly set in tinc host file
lineinfile:
dest: /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
line: "Subnet = {{ vpn_ip }}/{{ vpn_subnet_cidr_netmask }}"
create: yes
notify:
- restart tinc
# in case of multimaster we need to add a subnet line
- name: ensure that keepalived ip is properly set in tinc host file on k8s_masters
lineinfile:
dest: /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
line: "Subnet = {{ api_floating_ip }}/{{ vpn_subnet_cidr_netmask }}"
create: yes
when: groups.k8s_masters | length > 1
- name: check whether /etc/tinc/netname/hosts/inventory_hostname contains "-----END RSA PUBLIC KEY-----"
command: awk '/^-----END RSA PUBLIC KEY-----$/' /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
changed_when: "public_key.stdout != '-----END RSA PUBLIC KEY-----'"
register: public_key
# this is necessary because the public key will not be generated (non-interactively) if the private key already exists
- name: delete private key and regenerate keypair if public key is absent from tinc hosts file
file:
path: /etc/tinc/{{ netname }}/rsa_key.priv
state: absent
when: public_key.changed
- name: create tinc private key (and append public key to tincd hosts file)
shell: tincd -n {{ netname }} -K4096
args:
creates: /etc/tinc/{{ netname }}/rsa_key.priv
notify:
- restart tinc
- name: fetch tinc hosts file after key creation
fetch:
src: /etc/tinc/{{ netname }}/hosts/{{ inventory_hostname }}
dest: fetch/{{ inventory_hostname }}
flat: yes
notify:
- reload tinc
- name: sync the fetched tinc hosts files on each host
synchronize:
src: fetch/
dest: /etc/tinc/{{ netname }}/hosts/
use_ssh_args: yes
notify:
- reload tinc
- meta: flush_handlers
- name: start tinc on boot
systemd:
name: tinc
enabled: yes
state: started
- name: ensure tun0 exists
shell: "ip a s"
register: result
until: result.stdout.find("tun0") != -1
retries: 200
delay: 10
changed_when: False
- name: add nodes to /etc/hosts (ansible_inventory resolves to vpn_ip)
lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].vpn_ip }} {{item}}" state=present
when: hostvars[item].vpn_ip is defined
with_items: "{{ play_hosts }}"

View file

@ -0,0 +1 @@
{{ netname }}

View file

@ -0,0 +1,2 @@
#!/bin/sh
ifconfig {{ vpn_interface }} down

View file

@ -0,0 +1,16 @@
#!/bin/sh
ifconfig {{ vpn_interface }} {{ vpn_ip }} netmask {{ vpn_netmask }}
{% if inventory_hostname != tinc_primary_router %}
ROUTE_GET_IP={{ tinc_route_get_ip }}
INTERFACE=$(ip route get $ROUTE_GET_IP | head -n1 | sed -E 's/.+ dev ([^ ]+).+/\1/')
GATEWAY=$(ip route | awk '$3 == "'$INTERFACE'" { print $1 }' | cut -d'/' -f1)
ip route add 10.0.0.0/8 via $GATEWAY dev $INTERFACE
ip route add 169.254.0.0/16 via $GATEWAY dev $INTERFACE
ip route add 172.16.0.0/12 via $GATEWAY dev $INTERFACE
ip route add 192.168.0.0/16 via $GATEWAY dev $INTERFACE
ip route replace default via {{ tinc_route_default_ip }}
{% endif %}

View file

@ -0,0 +1,10 @@
Name = {{ inventory_hostname }}
AddressFamily = ipv4
Interface = {{ vpn_interface }}
Mode = switch
{% for host in play_hosts %}
{% if inventory_hostname != hostvars[host]['inventory_hostname'] %}
ConnectTo = {{ hostvars[host]['inventory_hostname'] }}
{% endif %}
{% endfor %}