l: add monitoring config
This commit is contained in:
parent
93fd2821b3
commit
9cbce7b82f
208
lass/2configs/monitoring/alert-rules.nix
Normal file
208
lass/2configs/monitoring/alert-rules.nix
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# inspiration from https://github.com/Mic92/dotfiles/blob/master/nixos/eva/modules/prometheus/alert-rules.nix
|
||||||
|
{ lib }:
|
||||||
|
|
||||||
|
lib.mapAttrsToList
|
||||||
|
(name: opts: {
|
||||||
|
alert = name;
|
||||||
|
expr = opts.condition;
|
||||||
|
for = opts.time or "2m";
|
||||||
|
labels = { };
|
||||||
|
annotations.description = opts.description;
|
||||||
|
})
|
||||||
|
({
|
||||||
|
prometheus_too_many_restarts = {
|
||||||
|
condition = ''changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager|telegraf"}[15m]) > 2'';
|
||||||
|
description = "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.";
|
||||||
|
};
|
||||||
|
|
||||||
|
alert_manager_config_not_synced = {
|
||||||
|
condition = ''count(count_values("config_hash", alertmanager_config_hash)) > 1'';
|
||||||
|
description = "Configurations of AlertManager cluster instances are out of sync.";
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus_not_connected_to_alertmanager = {
|
||||||
|
condition = "prometheus_notifications_alertmanagers_discovered < 1";
|
||||||
|
description = "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus_rule_evaluation_failures = {
|
||||||
|
condition = "increase(prometheus_rule_evaluation_failures_total[3m]) > 0";
|
||||||
|
description = "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus_template_expansion_failures = {
|
||||||
|
condition = "increase(prometheus_template_text_expansion_failures_total[3m]) > 0";
|
||||||
|
time = "0m";
|
||||||
|
description = "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||||
|
};
|
||||||
|
|
||||||
|
promtail_request_errors = {
|
||||||
|
condition = ''100 * sum(rate(promtail_request_duration_seconds_count{status_code=~"5..|failed"}[1m])) by (namespace, job, route, instance) / sum(rate(promtail_request_duration_seconds_count[1m])) by (namespace, job, route, instance) > 10'';
|
||||||
|
time = "15m";
|
||||||
|
description = ''{{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors.'';
|
||||||
|
};
|
||||||
|
|
||||||
|
promtail_file_lagging = {
|
||||||
|
condition = ''abs(promtail_file_bytes_total - promtail_read_bytes_total) > 1e6'';
|
||||||
|
time = "15m";
|
||||||
|
description = ''{{ $labels.instance }} {{ $labels.job }} {{ $labels.path }} has been lagging by more than 1MB for more than 15m.'';
|
||||||
|
};
|
||||||
|
|
||||||
|
filesystem_full_80percent = {
|
||||||
|
condition = ''disk_used_percent{mode!="ro"} >= 95'';
|
||||||
|
time = "10m";
|
||||||
|
description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.path}} got less than 20% space left on its filesystem.";
|
||||||
|
};
|
||||||
|
|
||||||
|
filesystem_full_krebs = {
|
||||||
|
condition = ''disk_used_percent{mode!="ro", org="krebs"} >= 95'';
|
||||||
|
time = "10m";
|
||||||
|
description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.path}} got less than 5% space left on its filesystem.";
|
||||||
|
};
|
||||||
|
|
||||||
|
filesystem_inodes_full = {
|
||||||
|
condition = ''disk_inodes_free / disk_inodes_total < 0.10'';
|
||||||
|
time = "10m";
|
||||||
|
description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.path}} got less than 10% inodes left on its filesystem.";
|
||||||
|
};
|
||||||
|
|
||||||
|
daily_task_not_run = {
|
||||||
|
# give 6 hours grace period
|
||||||
|
condition = ''time() - task_last_run{state="ok",frequency="daily"} > (24 + 6) * 60 * 60'';
|
||||||
|
description = "{{$labels.host}}: {{$labels.name}} was not run in the last 24h";
|
||||||
|
};
|
||||||
|
|
||||||
|
daily_task_failed = {
|
||||||
|
condition = ''task_last_run{state="fail"}'';
|
||||||
|
description = "{{$labels.host}}: {{$labels.name}} failed to run";
|
||||||
|
};
|
||||||
|
|
||||||
|
swap_using_30percent = {
|
||||||
|
condition = "mem_swap_total - (mem_swap_cached + mem_swap_free) > mem_swap_total * 0.3";
|
||||||
|
time = "30m";
|
||||||
|
description = "{{$labels.host}} is using 30% of its swap space for at least 30 minutes.";
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd_service_failed = {
|
||||||
|
condition = ''systemd_units_active_code{name!~"nixpkgs-update-.*.service"} == 3'';
|
||||||
|
description = "{{$labels.host}} failed to (re)start service {{$labels.name}}.";
|
||||||
|
};
|
||||||
|
|
||||||
|
service_not_running = {
|
||||||
|
condition = ''systemd_units_active_code{name=~"teamspeak3-server.service|tt-rss.service", sub!="running"}'';
|
||||||
|
description = "{{$labels.host}} should have a running {{$labels.name}}.";
|
||||||
|
};
|
||||||
|
|
||||||
|
nfs_export_not_present = {
|
||||||
|
condition = "nfs_export_present == 0";
|
||||||
|
time = "1h";
|
||||||
|
description = "{{$labels.host}} cannot reach nfs export [{{$labels.server}}]:{{$labels.path}}";
|
||||||
|
};
|
||||||
|
|
||||||
|
ram_using_90percent = {
|
||||||
|
condition = "mem_buffered + mem_free + mem_cached < mem_total * 0.1";
|
||||||
|
time = "1h";
|
||||||
|
description = "{{$labels.host}} is using at least 90% of its RAM for at least 1 hour.";
|
||||||
|
};
|
||||||
|
load15 = {
|
||||||
|
condition = ''system_load15 / system_n_cpus{org!="nix-community"} >= 2.0'';
|
||||||
|
time = "10m";
|
||||||
|
description = "{{$labels.host}} is running with load15 > 1 for at least 5 minutes: {{$value}}";
|
||||||
|
};
|
||||||
|
reboot = {
|
||||||
|
condition = "system_uptime < 300";
|
||||||
|
description = "{{$labels.host}} just rebooted.";
|
||||||
|
};
|
||||||
|
uptime = {
|
||||||
|
# too scared to upgrade matchbox
|
||||||
|
condition = ''system_uptime {host!~"^(matchbox|grandalf)$"} > 2592000'';
|
||||||
|
description = "Uptime monster: {{$labels.host}} has been up for more than 30 days.";
|
||||||
|
};
|
||||||
|
telegraf_down = {
|
||||||
|
condition = ''min(up{job=~"telegraf",type!='mobile'}) by (source, job, instance, org) == 0'';
|
||||||
|
time = "3m";
|
||||||
|
description = "{{$labels.instance}}: {{$labels.job}} telegraf exporter from {{$labels.source}} is down.";
|
||||||
|
};
|
||||||
|
ping = {
|
||||||
|
condition = "ping_result_code{type!='mobile'} != 0";
|
||||||
|
description = "{{$labels.url}}: ping from {{$labels.instance}} has failed!";
|
||||||
|
};
|
||||||
|
ping_high_latency = {
|
||||||
|
condition = "ping_average_response_ms{type!='mobile'} > 5000";
|
||||||
|
description = "{{$labels.instance}}: ping probe from {{$labels.source}} is encountering high latency!";
|
||||||
|
};
|
||||||
|
http = {
|
||||||
|
condition = "http_response_result_code != 0";
|
||||||
|
description = "{{$labels.server}} : http request failed from {{$labels.instance}}: {{$labels.result}}!";
|
||||||
|
};
|
||||||
|
http_match_failed = {
|
||||||
|
condition = "http_response_response_string_match == 0";
|
||||||
|
description = "{{$labels.server}} : http body not as expected; status code: {{$labels.status_code}}!";
|
||||||
|
};
|
||||||
|
dns_query = {
|
||||||
|
condition = "dns_query_result_code != 0";
|
||||||
|
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}}!";
|
||||||
|
};
|
||||||
|
secure_dns_query = {
|
||||||
|
condition = "secure_dns_state != 0";
|
||||||
|
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}} for protocol {{$labels.protocol}}!";
|
||||||
|
};
|
||||||
|
connection_failed = {
|
||||||
|
condition = "net_response_result_code != 0";
|
||||||
|
description = "{{$labels.server}}: connection to {{$labels.port}}({{$labels.protocol}}) failed from {{$labels.instance}}";
|
||||||
|
};
|
||||||
|
healthchecks = {
|
||||||
|
condition = "hc_check_up == 0";
|
||||||
|
description = "{{$labels.instance}}: healtcheck {{$labels.job}} fails!";
|
||||||
|
};
|
||||||
|
cert_expiry = {
|
||||||
|
condition = "x509_cert_expiry < 7*24*3600";
|
||||||
|
description = "{{$labels.instance}}: The TLS certificate from {{$labels.source}} will expire in less than 7 days: {{$value}}s";
|
||||||
|
};
|
||||||
|
|
||||||
|
postfix_queue_length = {
|
||||||
|
condition = "avg_over_time(postfix_queue_length[1h]) > 10";
|
||||||
|
description = "{{$labels.instance}}: postfix mail queue has undelivered {{$value}} items";
|
||||||
|
};
|
||||||
|
|
||||||
|
zfs_errors = {
|
||||||
|
condition = "zfs_arcstats_l2_io_error + zfs_dmu_tx_error + zfs_arcstats_l2_writes_error > 0";
|
||||||
|
description = "{{$labels.instance}} reports: {{$value}} ZFS IO errors.";
|
||||||
|
};
|
||||||
|
|
||||||
|
# ignore devices that disabled S.M.A.R.T (example if attached via USB)
|
||||||
|
smart_errors = {
|
||||||
|
condition = ''smart_device_health_ok{enabled!="Disabled"} != 1'';
|
||||||
|
description = "{{$labels.instance}}: S.M.A.R.T reports: {{$labels.device}} ({{$labels.model}}) has errors.";
|
||||||
|
};
|
||||||
|
|
||||||
|
oom_kills = {
|
||||||
|
condition = "increase(kernel_vmstat_oom_kill[5m]) > 0";
|
||||||
|
description = "{{$labels.instance}}: OOM kill detected";
|
||||||
|
};
|
||||||
|
|
||||||
|
unusual_disk_read_latency = {
|
||||||
|
condition = "rate(diskio_read_time[1m]) / rate(diskio_reads[1m]) > 0.1 and rate(diskio_reads[1m]) > 0";
|
||||||
|
description = "{{$labels.instance}}: Disk latency is growing (read operations > 100ms)\n";
|
||||||
|
};
|
||||||
|
|
||||||
|
unusual_disk_write_latency = {
|
||||||
|
condition = "rate(diskio_write_time[1m]) / rate(diskio_write[1m]) > 0.1 and rate(diskio_write[1m]) > 0";
|
||||||
|
description = "{{$labels.instance}}: Disk latency is growing (write operations > 100ms)\n";
|
||||||
|
};
|
||||||
|
|
||||||
|
host_memory_under_memory_pressure = {
|
||||||
|
condition = "rate(node_vmstat_pgmajfault[1m]) > 1000";
|
||||||
|
description = "{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}";
|
||||||
|
};
|
||||||
|
|
||||||
|
ext4_errors = {
|
||||||
|
condition = "ext4_errors_value > 0";
|
||||||
|
description = "{{$labels.instance}}: ext4 has reported {{$value}} I/O errors: check /sys/fs/ext4/*/errors_count";
|
||||||
|
};
|
||||||
|
|
||||||
|
alerts_silences_changed = {
|
||||||
|
condition = ''abs(delta(alertmanager_silences{state="active"}[1h])) >= 1'';
|
||||||
|
description = "alertmanager: number of active silences has changed: {{$value}}";
|
||||||
|
};
|
||||||
|
})
|
110
lass/2configs/monitoring/prometheus.nix
Normal file
110
lass/2configs/monitoring/prometheus.nix
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
{
|
||||||
|
#prometheus
|
||||||
|
krebs.iptables = {
|
||||||
|
enable = true;
|
||||||
|
tables.filter.INPUT.rules = [
|
||||||
|
{ predicate = "-i retiolum -p tcp --dport 80"; target = "ACCEPT"; } # nginx
|
||||||
|
# { predicate = "-i retiolum -p tcp --dport 3012"; target = "ACCEPT"; } # grafana
|
||||||
|
# { predicate = "-i retiolum -p tcp --dport 9093"; target = "ACCEPT"; } # alertmanager
|
||||||
|
# { predicate = "-i retiolum -p tcp --dport 9223"; target = "ACCEPT"; } # alertmanager
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
virtualHosts = {
|
||||||
|
"prometheus.lass.r" = {
|
||||||
|
locations."/".proxyPass = "http://localhost:9090";
|
||||||
|
};
|
||||||
|
"alert.lass.r" = {
|
||||||
|
locations."/".proxyPass = "http://localhost:9093";
|
||||||
|
};
|
||||||
|
"grafana.lass.r" = {
|
||||||
|
locations."/".proxyPass = "http://localhost:3012";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.grafana = {
|
||||||
|
enable = true;
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 3012;
|
||||||
|
auth.anonymous = {
|
||||||
|
enable = true;
|
||||||
|
org_role = "Admin";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
ruleFiles = [
|
||||||
|
(pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
|
||||||
|
groups = [{
|
||||||
|
name = "alerting-rules";
|
||||||
|
rules = import ./alert-rules.nix { inherit lib; };
|
||||||
|
}];
|
||||||
|
}))
|
||||||
|
];
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "telegraf";
|
||||||
|
scrape_interval = "60s";
|
||||||
|
metrics_path = "/metrics";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prism.r:9273"
|
||||||
|
"dishfire.r:9273"
|
||||||
|
"yellow.r:9273"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
alertmanagers = [
|
||||||
|
{ scheme = "http";
|
||||||
|
path_prefix = "/";
|
||||||
|
static_configs = [ { targets = [ "localhost:9093" ]; } ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
alertmanager = {
|
||||||
|
enable = true;
|
||||||
|
webExternalUrl = "https://alert.lass.r";
|
||||||
|
listenAddress = "[::1]";
|
||||||
|
configuration = {
|
||||||
|
global = {
|
||||||
|
# The smarthost and SMTP sender used for mail notifications.
|
||||||
|
smtp_smarthost = "localhost:587";
|
||||||
|
smtp_from = "alertmanager@alert.lass.r";
|
||||||
|
# smtp_auth_username = "alertmanager@thalheim.io";
|
||||||
|
# smtp_auth_password = "$SMTP_PASSWORD";
|
||||||
|
};
|
||||||
|
route = {
|
||||||
|
receiver = "default";
|
||||||
|
routes = [
|
||||||
|
{
|
||||||
|
group_by = [ "host" ];
|
||||||
|
group_wait = "30s";
|
||||||
|
group_interval = "2m";
|
||||||
|
repeat_interval = "2h";
|
||||||
|
receiver = "all";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
receivers = [
|
||||||
|
{
|
||||||
|
name = "all";
|
||||||
|
webhook_configs = [{
|
||||||
|
url = "http://127.0.0.1:9223/";
|
||||||
|
max_alerts = 5;
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "default";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
72
lass/2configs/monitoring/telegraf.nix
Normal file
72
lass/2configs/monitoring/telegraf.nix
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
let
|
||||||
|
isVM = lib.any (mod: mod == "xen-blkfront" || mod == "virtio_console") config.boot.initrd.kernelModules;
|
||||||
|
in {
|
||||||
|
|
||||||
|
krebs.iptables.tables.filter.INPUT.rules = [
|
||||||
|
{ predicate = "-i retiolum -p tcp --dport 9273"; target = "ACCEPT"; }
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.telegraf.path = [ pkgs.nvme-cli ];
|
||||||
|
|
||||||
|
services.telegraf = {
|
||||||
|
enable = true;
|
||||||
|
extraConfig = {
|
||||||
|
agent.interval = "60s";
|
||||||
|
inputs = {
|
||||||
|
http_response = [
|
||||||
|
{ urls = [
|
||||||
|
"http://localhost:8080/about/health/"
|
||||||
|
]; }
|
||||||
|
];
|
||||||
|
prometheus.metric_version = 2;
|
||||||
|
kernel_vmstat = { };
|
||||||
|
# smart = lib.mkIf (!isVM) {
|
||||||
|
# path = pkgs.writeShellScript "smartctl" ''
|
||||||
|
# exec /run/wrappers/bin/sudo ${pkgs.smartmontools}/bin/smartctl "$@"
|
||||||
|
# '';
|
||||||
|
# };
|
||||||
|
system = { };
|
||||||
|
mem = { };
|
||||||
|
file = [{
|
||||||
|
data_format = "influx";
|
||||||
|
file_tag = "name";
|
||||||
|
files = [ "/var/log/telegraf/*" ];
|
||||||
|
}] ++ lib.optional (lib.any (fs: fs == "ext4") config.boot.supportedFilesystems) {
|
||||||
|
name_override = "ext4_errors";
|
||||||
|
files = [ "/sys/fs/ext4/*/errors_count" ];
|
||||||
|
data_format = "value";
|
||||||
|
};
|
||||||
|
exec = lib.optionalAttrs (lib.any (fs: fs == "zfs") config.boot.supportedFilesystems) {
|
||||||
|
## Commands array
|
||||||
|
commands = [
|
||||||
|
(pkgs.writeScript "zpool-health" ''
|
||||||
|
#!${pkgs.gawk}/bin/awk -f
|
||||||
|
BEGIN {
|
||||||
|
while ("${pkgs.zfs}/bin/zpool status" | getline) {
|
||||||
|
if ($1 ~ /pool:/) { printf "zpool_status,name=%s ", $2 }
|
||||||
|
if ($1 ~ /state:/) { printf " state=\"%s\",", $2 }
|
||||||
|
if ($1 ~ /errors:/) {
|
||||||
|
if (index($2, "No")) printf "errors=0i\n"; else printf "errors=%di\n", $2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
data_format = "influx";
|
||||||
|
};
|
||||||
|
systemd_units = { };
|
||||||
|
swap = { };
|
||||||
|
disk.tagdrop = {
|
||||||
|
fstype = [ "tmpfs" "ramfs" "devtmpfs" "devfs" "iso9660" "overlay" "aufs" "squashfs" ];
|
||||||
|
device = [ "rpc_pipefs" "lxcfs" "nsfs" "borgfs" ];
|
||||||
|
};
|
||||||
|
diskio = { };
|
||||||
|
};
|
||||||
|
outputs.prometheus_client = {
|
||||||
|
listen = ":9273";
|
||||||
|
metric_version = 2;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user