Add matrix, plex and deluge

This commit is contained in:
Paul-Henri Froidmont 2017-12-31 16:04:38 +01:00
parent 643ab3229a
commit b09ac86a62
24 changed files with 390 additions and 1 deletions

View file

@ -15,6 +15,9 @@
- role: gitlab-docker
- role: mailu-docker
- role: nextcloud-docker
- role: matrix-docker
- role: plex-docker
- role: deluge-docker
vars_prompt:
- name: "ansible_sudo_pass"
prompt: "Sudo password"

View file

@ -0,0 +1 @@
---

View file

@ -0,0 +1,39 @@
version: '2.2'
networks:
proxy-tier:
external:
name: nginx-proxy
services:
deluge:
image: binhex/arch-delugevpn
cap_add:
- NET_ADMIN
ports:
- 8118:8118
- 58846:58846
volumes:
- /var/lib/deluge:/data
- ./config:/config
- /media:/media
- /etc/localtime:/etc/localtime:ro
environment:
- VPN_ENABLED=yes
- VPN_USER=${VPN_USER}
- VPN_PASS=${VPN_PASS}
- VPN_REMOTE=${VPN_REMOTE}
- VPN_PORT=${VPN_PORT}
- VPN_PROTOCOL=${VPN_PROTOCOL}
- VPN_PROV=${VPN_PROV}
- ENABLE_PRIVOXY=yes
- LAN_NETWORK=10.90.86.0/26
- DEBUG=true
- PUID=33
- PGID=33
- VIRTUAL_HOST=${DELUGE_DOMAIN}
- VIRTUAL_NETWORK=nginx-proxy
- VIRTUAL_PORT=8112
restart: always
networks:
- proxy-tier

View file

@ -0,0 +1,2 @@
dependencies:
- nginx-proxy-docker

View file

@ -0,0 +1,13 @@
---
- name: Copy deluge docker files
copy:
src: deluge
dest: "{{docker_compose_files_folder}}"
- name: Create deluge .env
template:
src: deluge/.env
dest: "{{docker_compose_files_folder}}/deluge/.env"
- name: Start deluge docker project
docker_service:
project_src: "{{docker_compose_files_folder}}/deluge"
state: present

View file

@ -0,0 +1,12 @@
COMPOSE_PROJECT_NAME=deluge
#Domains
DELUGE_DOMAIN=deluge.banditlair.com
#Deluge VPN
VPN_USER={{deluge_vpn_user}}
VPN_PASS={{deluge_vpn_password}}
VPN_REMOTE=lon.uk.torguardvpnaccess.com
VPN_PORT=1912
VPN_PROTOCOL=udp
VPN_PROV=torguard_london

View file

@ -0,0 +1 @@
---

View file

@ -0,0 +1,43 @@
version: "2"
services:
db:
image: postgres:9.6
restart: always
# Adding 127.0.0.1 ensures the port isn't exposed ON the host
ports:
- "127.0.0.1:5432:5432"
volumes:
- /var/lib/matrix/db:/var/lib/postgresql/data
- /backups/matrix:/backups
environment:
- POSTGRES_PASSWORD=synapse
- POSTGRES_USER=synapse
networks:
- matrix
synapse:
image: silviof/docker-matrix
ports:
- "127.0.0.1:8008:8008"
- "8448:8448"
- "3478:3478"
environment:
- VIRTUAL_HOST=matrix.banditlair.com
- VIRTUAL_NETWORK=nginx-proxy
- VIRTUAL_PORT=8008
volumes:
- /var/lib/matrix/media_store:/data/media_store
- /var/log/synapse:/data/log
- ./synapse:/data
networks:
- matrix
- proxy-tier
restart: always
networks:
matrix:
external:
name: matrix-network
proxy-tier:
external:
name: nginx-proxy

View file

@ -0,0 +1,2 @@
dependencies:
- nginx-proxy-docker

View file

@ -0,0 +1,24 @@
---
- name: Copy matrix docker files
copy:
src: matrix
dest: "{{docker_compose_files_folder}}"
- name: Create matrix-network docker network
docker_network:
name: matrix-network
- name: Start matrix docker project
docker_service:
project_src: "{{docker_compose_files_folder}}/matrix"
state: present
- name: Check if database tables exist
command: docker-compose exec -T db psql -U synapse synapse -c "\dt"
args:
chdir: "{{docker_compose_files_folder}}/matrix/"
register: db_tables_exist
ignore_errors: false
changed_when: '"No relations found." in db_tables_exist.stdout_lines'
- name: Restore Matrix database
command: docker-compose exec -T db sh -c "psql -U synapse synapse < /backups/database.dmp"
args:
chdir: "{{docker_compose_files_folder}}/matrix/"
when: '"No relations found." in db_tables_exist.stdout_lines'

View file

@ -0,0 +1,7 @@
COMPOSE_PROJECT_NAME=monitoring
#Domains
GRAFANA_DOMAIN=grafana.banditlair.com
#Letsencrypt
LETSENCRYPT_EMAIL=banditlair@outlook.com

View file

@ -0,0 +1,10 @@
route:
receiver: 'slack'
receivers:
- name: 'slack'
slack_configs:
- send_resolved: true
username: '<username>'
channel: '#<channel-name>'
api_url: '<incomming-webhook-url>'

View file

@ -0,0 +1,93 @@
version: '2.2'
volumes:
prometheus_data: {}
grafana_data: {}
networks:
proxy-tier:
external:
name: nginx-proxy
back-tier:
driver: bridge
services:
prometheus:
image: prom/prometheus
volumes:
- ./prometheus:/etc/prometheus/
- /var/lib/prometheus:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
# - '--alertmanager.url=http://alertmanager:9093'
expose:
- 9090
links:
- cadvisor:cadvisor
- alertmanager:alertmanager
depends_on:
- cadvisor
networks:
- back-tier
restart: always
node-exporter:
image: prom/node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command: '--path.procfs=/host/proc --path.sysfs=/host/sys --collector.filesystem.ignored-mount-points="^(/rootfs|/host|)/(sys|proc|dev|host|etc)($$|/)" --collector.filesystem.ignored-fs-types="^(sys|proc|auto|cgroup|devpts|ns|au|fuse\.lxc|mqueue)(fs|)$$"'
expose:
- 9100
networks:
- back-tier
restart: always
alertmanager:
image: prom/alertmanager
ports:
- 9093:9093
volumes:
- ./alertmanager/:/etc/alertmanager/
networks:
- back-tier
command:
- '-config.file=/etc/alertmanager/config.yml'
- '-storage.path=/alertmanager'
restart: always
cadvisor:
image: google/cadvisor
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
expose:
- 8080
networks:
- back-tier
restart: always
grafana:
image: grafana/grafana
depends_on:
- prometheus
expose:
- 3000
volumes:
- /var/lib/grafana:/var/lib/grafana
environment:
- VIRTUAL_HOST=${GRAFANA_DOMAIN}
- VIRTUAL_NETWORK=nginx-proxy
- VIRTUAL_PORT=3000
# - LETSENCRYPT_HOST=${GRAFANA_DOMAIN}
# - LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL}
env_file:
- ./grafana/grafana.env
networks:
- back-tier
- proxy-tier
restart: always

View file

@ -0,0 +1,2 @@
GF_SECURITY_ADMIN_PASSWORD=pSvpRYWAS9LJrcEiuiuAV33f
GF_USERS_ALLOW_SIGN_UP=false

View file

@ -0,0 +1,9 @@
ALERT service_down
IF up == 0
ALERT high_load
IF node_load1 > 0.5
ANNOTATIONS {
summary = "Instance {{ $labels.instance }} under high load",
description = "{{ $labels.instance }} of job {{ $labels.job }} is under high load.",
}

View file

@ -0,0 +1,31 @@
# my global config
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'banditlair'
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
- "alert.rules"
# - "first.rules"
# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090','cadvisor:8080','node-exporter:9100']

View file

@ -29,7 +29,7 @@
register: db_tables_exist
ignore_errors: true
changed_when: db_tables_exist.stdout_lines|length == 0
- name: Restore database
- name: Restore Nextcloud database
command: docker-compose exec -T db sh -c "mysql -u nextcloud -p{{nextcloud_mysql_password}} nextcloud < /backups/database.dmp"
args:
chdir: "{{docker_compose_files_folder}}/nextcloud/"

View file

@ -0,0 +1 @@
---

View file

@ -0,0 +1,24 @@
version: '2.2'
networks:
proxy-tier:
external:
name: nginx-proxy
services:
plex:
image: plexinc/pms-docker:plexpass
hostname: ${PLEX_DOMAIN}
volumes:
- /var/lib/plex/database:/config
- /media:/data
- /var/lib/plex/transcode:/transcode
- /tmp:/tmp
environment:
- VIRTUAL_HOST=${PLEX_DOMAIN}
- VIRTUAL_NETWORK=nginx-proxy
- VIRTUAL_PORT=32400
- ADVERTISE_IP=https://plex.banditlair.com/
networks:
- proxy-tier
restart: always

View file

@ -0,0 +1,2 @@
dependencies:
- nginx-proxy-docker

View file

@ -0,0 +1,13 @@
---
- name: Copy plex docker files
copy:
src: plex
dest: "{{docker_compose_files_folder}}"
- name: Create plex .env
template:
src: plex/.env
dest: "{{docker_compose_files_folder}}/plex/.env"
- name: Start plex docker project
docker_service:
project_src: "{{docker_compose_files_folder}}/plex"
state: present

View file

@ -0,0 +1,8 @@
COMPOSE_PROJECT_NAME=plex
#Domains
PLEX_DOMAIN=plex.banditlair.com
#Plex
PLEX_USERNAME=banditlair
PLEX_PASSWORD={{plex_account_password}}

View file

@ -0,0 +1,6 @@
COMPOSE_PROJECT_NAME=sonar
SONAR_DOMAIN=sonar.banditlair.com
#Letsencrypt
LETSENCRYPT_EMAIL=banditlair@outlook.com

View file

@ -0,0 +1,43 @@
version: '2.2'
networks:
proxy-tier:
external:
name: nginx-proxy
sonarnet:
driver: bridge
services:
sonarqube:
image: sonarqube
expose:
- 9000
environment:
- SONARQUBE_JDBC_URL=jdbc:postgresql://db:5432/sonar
- VIRTUAL_HOST=${SONAR_DOMAIN}
- VIRTUAL_NETWORK=nginx-proxy
- VIRTUAL_PORT=9000
# - LETSENCRYPT_HOST=${SONAR_DOMAIN}
# - LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL}
volumes:
- ./sonarqube:/opt/sonarqube/conf
- /var/lib/sonarqube/data:/opt/sonarqube/data
- /var/lib/sonarqube/extensions:/opt/sonarqube/extensions
- /var/lib/sonarqube/lib/bundled-plugins:/opt/sonarqube/lib/bundled-plugins
links:
- db
networks:
- sonarnet
- proxy-tier
restart: unless-stopped
db:
image: postgres
environment:
- POSTGRES_USER=sonar
- POSTGRES_PASSWORD=sonar
volumes:
- /var/lib/sonaqube/db:/var/lib/postgresql
networks:
- sonarnet
restart: unless-stopped