mirror of
https://github.com/phfroidmont/self-hosting.git
synced 2025-12-25 13:46:59 +01:00
Add matrix, plex and deluge
This commit is contained in:
parent
643ab3229a
commit
b09ac86a62
24 changed files with 390 additions and 1 deletions
|
|
@ -0,0 +1,9 @@
|
|||
ALERT service_down
|
||||
IF up == 0
|
||||
|
||||
ALERT high_load
|
||||
IF node_load1 > 0.5
|
||||
ANNOTATIONS {
|
||||
summary = "Instance {{ $labels.instance }} under high load",
|
||||
description = "{{ $labels.instance }} of job {{ $labels.job }} is under high load.",
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Attach these labels to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
external_labels:
|
||||
monitor: 'banditlair'
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
rule_files:
|
||||
- "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'prometheus'
|
||||
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
static_configs:
|
||||
- targets: ['localhost:9090','cadvisor:8080','node-exporter:9100']
|
||||
Loading…
Add table
Add a link
Reference in a new issue