Merge remote-tracking branch 'gum/master' into HEAD

This commit is contained in:
lassulus 2020-09-15 20:16:44 +02:00
commit 624deaddd0
16 changed files with 202 additions and 141 deletions

View File

@ -18,6 +18,7 @@
<stockholm/krebs/2configs/shack/prometheus/server.nix>
<stockholm/krebs/2configs/shack/prometheus/blackbox.nix>
<stockholm/krebs/2configs/shack/prometheus/unifi.nix>
<stockholm/krebs/2configs/shack/prometheus/alertmanager-telegram.nix>
<stockholm/krebs/2configs/shack/gitlab-runner.nix>
## Collect local statistics via collectd and send to collectd

View File

@ -34,6 +34,22 @@ in
# powerraw usb serial to mqtt and raw socket
<stockholm/krebs/2configs/shack/powerraw.nix>
{ # do not log to /var/spool/log
services.nginx.appendHttpConfig = ''
map $request_method $loggable {
default 1;
GET 0;
}
log_format vhost '$host $remote_addr - $remote_user '
'[$time_local] "$request" $status '
'$body_bytes_sent "$http_referer" '
'"$http_user_agent"';
error_log stderr;
access_log syslog:server=unix:/dev/log vhost;
'';
services.journald.rateLimitBurst = 10000;
}
# create samba share for anonymous usage with the laser and 3d printer pc
<stockholm/krebs/2configs/shack/share.nix>

View File

@ -62,13 +62,14 @@ in {
];
};
# https://www.home-assistant.io/components/influxdb/
#influxdb = {
# database = "hass";
# tags = {
# instance = "wolf";
# source = "hass";
# };
#};
influxdb = {
database = "glados";
host = "influx.shack";
tags = {
instance = "wolf";
source = "glados";
};
};
esphome = {};
api = {};
mqtt = {

View File

@ -2,7 +2,7 @@
# switch.crafting_giesskanne_relay
let
glados = import ../lib;
seconds = 10;
seconds = 20;
wasser = "switch.crafting_giesskanne_relay";
in
{

View File

@ -8,6 +8,11 @@ in
networking.firewall.allowedTCPPorts = [ port ]; # for legacy applications
networking.firewall.allowedUDPPorts = [ collectd-port ];
services.nginx.virtualHosts."influx.shack" = {
# Disable constant GET request logging.
# $loggable map is defined in 1/wolf
extraConfig = ''
access_log syslog:server=unix:/dev/log combined if=$loggable;
'';
locations."/" = {
proxyPass = "http://localhost:${toString port}/";
};

View File

@ -28,6 +28,9 @@ in {
};
services.nginx.virtualHosts."openhab.shack" = {
extraConfig = ''
access_log syslog:server=unix:/dev/log combined if=$loggable;
'';
serverAliases = [ "lightapi.shack" ];
locations."/power/".proxyPass = "http://localhost:${port}/power/";
locations."/lounge/".proxyPass = "http://localhost:${port}/lounge/";

View File

@ -1,102 +1,42 @@
{ lib }:
with lib;
{ lib,... }:
let
deviceFilter = ''device!="ramfs",device!="rpc_pipefs",device!="lxcfs",device!="nsfs",device!="borgfs"'';
in mapAttrsToList (name: opts: {
alert = name;
expr = opts.condition;
for = opts.time or "2m";
labels = if (opts.page or true) then { severity = "page"; } else {};
annotations = {
summary = opts.summary;
description = opts.description;
};
}) {
node_down = {
condition = ''up{job="node"} == 0'';
summary = "{{$labels.alias}}: Node is down.";
description = "{{$labels.alias}} has been down for more than 2 minutes.";
};
node_systemd_service_failed = {
condition = ''node_systemd_unit_state{state="failed"} == 1'';
summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start.";
description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}.";
};
node_filesystem_full_80percent = {
condition = ''sort(node_filesystem_free_bytes{${deviceFilter}} < node_filesystem_size_bytes{${deviceFilter}} * 0.2) / 1024^3'';
time = "10m";
summary = "{{$labels.alias}}: Filesystem is running out of space soon.";
description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem.";
};
node_filesystem_full_in_7d = {
condition = ''predict_linear(node_filesystem_free_bytes{${deviceFilter}}[2d], 7*24*3600) <= 0'';
time = "1h";
summary = "{{$labels.alias}}: Filesystem is running out of space in 7 days.";
description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 7 days";
};
node_filesystem_full_in_30d = {
condition = ''predict_linear(node_filesystem_free_bytes{${deviceFilter}}[30d], 30*24*3600) <= 0'';
time = "1h";
summary = "{{$labels.alias}}: Filesystem is running out of space in 30 days.";
description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 30 days";
};
node_filedescriptors_full_in_3h = {
condition = ''predict_linear(node_filefd_allocated[3h], 3*3600) >= node_filefd_maximum'';
time = "20m";
summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours.";
description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours";
};
node_filedescriptors_full_in_7d = {
condition = ''predict_linear(node_filefd_allocated[7d], 7*24*3600) >= node_filefd_maximum'';
time = "1h";
summary = "{{$labels.alias}} is running out of available file descriptors in 7 days.";
description = "{{$labels.alias}} is running out of available file descriptors in approx. 7 days";
};
node_load15 = {
condition = ''node_load15 / on(alias) count(node_cpu_seconds_total{mode="system"}) by (alias) >= 1.0'';
time = "10m";
summary = "{{$labels.alias}}: Running on high load: {{$value}}";
description = "{{$labels.alias}} is running with load15 > 1 for at least 5 minutes: {{$value}}";
};
node_ram_using_90percent = {
condition = "node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes < node_memory_MemTotal_bytes * 0.1";
time = "1h";
summary = "{{$labels.alias}}: Using lots of RAM.";
description = "{{$labels.alias}} is using at least 90% of its RAM for at least 1 hour.";
};
node_swap_using_30percent = {
condition = "node_memory_SwapTotal_bytes - (node_memory_SwapFree_bytes + node_memory_SwapCached_bytes) > node_memory_SwapTotal_bytes * 0.3";
time = "30m";
summary = "{{$labels.alias}}: Using more than 30% of its swap.";
description = "{{$labels.alias}} is using 30% of its swap space for at least 30 minutes.";
};
node_visible_confluence_space = {
condition = "node_visible_confluence_space != 0";
summary = "crowd prometheus cann see the {{$labels.space_name}} confluence space!";
description = "crowd user `prometheus` can see the `{{$labels.space_name}}` confluence space.";
};
node_hwmon_temp = {
condition = "node_hwmon_temp_celsius > node_hwmon_temp_crit_celsius*0.9 OR node_hwmon_temp_celsius > node_hwmon_temp_max_celsius*0.95";
time = "5m";
summary = "{{$labels.alias}}: Sensor {{$labels.sensor}}/{{$labels.chip}} temp is high: {{$value}} ";
description = "{{$labels.alias}} reports hwmon sensor {{$labels.sensor}}/{{$labels.chip}} temperature value is nearly critical: {{$value}}";
};
node_conntrack_limit = {
condition = "node_nf_conntrack_entries_limit - node_nf_conntrack_entries < 1000";
time = "5m";
summary = "{{$labels.alias}}: Number of tracked connections high";
description = "{{$labels.alias}} has only {{$value}} free slots for connection tracking available.";
};
node_reboot = {
condition = "time() - node_boot_time_seconds < 300";
summary = "{{$labels.alias}}: Reboot";
description = "{{$labels.alias}} just rebooted.";
};
node_uptime = {
condition = "time() - node_boot_time_seconds > 2592000";
page = false;
summary = "{{$labels.alias}}: Uptime monster";
description = "{{$labels.alias}} has been up for more than 30 days.";
};
disk_free_threshold = "10"; # at least this much free disk percentage
in {
services.prometheus.rules = [(builtins.toJSON
{
groups = [
{ name = "shack-env";
rules = [
{
alert = "RootPartitionFull";
for = "30m";
expr = ''(node_filesystem_avail_bytes{alias="wolf",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="wolf",mountpoint="/"} < ${disk_free_threshold}'';
labels.severity = "warning";
annotations.summary = "{{ $labels.alias }} root disk full";
annotations.url = "http://grafana.shack/";
annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%).A vast number of shackspace services will stop working. CI for deploying new configuration will also seize working. Log in to the system and run `nix-collect-garbage -d` and clean up the shack share folder in `/home/share` .If this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
}
{
alert = "RootPartitionFull";
for = "30m";
expr = ''(node_filesystem_avail_bytes{alias="puyak",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="puyak",mountpoint="/"} < ${disk_free_threshold}'';
labels.severity = "warning";
annotations.summary = "{{ $labels.alias }} root disk full";
annotations.url = "http://grafana.shack/";
annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%).Prometheus will not be able to create new alerts and CI for deploying new configuration will also seize working. Log in to the system and run `nix-collect-garbage -d` and if this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
}
{
alert = "HostDown";
expr = ''up{alias="wolf"} == 0'';
for = "5m";
labels.severity = "page";
annotations.summary = "Instance {{ $labels.alias }} down for 5 minutes";
annotations.url = "http://grafana.shack/";
annotations.description = ''Host {{ $labels.alias }} went down and has not been reconnected after 5 minutes. This is probably bad news, try to restart the host via naproxen ( http://naproxen.shack:8006 ). Wolf being down means that CI,glados automation, light management and a couple of other services will not work anymore.'';
}
];
}
];
}
)];
}

View File

@ -0,0 +1,17 @@
{ pkgs, ...}:
{
systemd.services.alertmanager-bot-telegram = {
wantedBy = [ "multi-user.target" ];
after = [ "ip-up.target" ];
serviceConfig = {
EnvironmentFile = toString <secrets/shack/telegram_bot.env>;
DynamicUser = true;
StateDirectory = "alertbot";
ExecStart = ''${pkgs.alertmanager-bot-telegram}/bin/alertmanager-bot \
--alertmanager.url=http://alert.prometheus.shack --log.level=info \
--store=bolt --bolt.path=/var/lib/alertbot/bot.db \
--listen.addr="0.0.0.0:16320" \
--template.paths=${pkgs.alertmanager-bot-telegram}/templates/default.tmpl'';
};
};
}

View File

@ -1,6 +1,9 @@
{ pkgs, lib, config, ... }:
# from https://gist.github.com/globin/02496fd10a96a36f092a8e7ea0e6c7dd
{
imports = [
./alert-rules.nix
];
networking = {
firewall.allowedTCPPorts = [
9090 # prometheus
@ -18,12 +21,6 @@
};
prometheus = {
enable = true;
ruleFiles = lib.singleton (pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
groups = lib.singleton {
name = "mf-alerting-rules";
rules = import ./alert-rules.nix { inherit lib; };
};
}));
scrapeConfigs = [
{
job_name = "node";
@ -118,7 +115,10 @@
];
alertmanager = {
enable = true;
listenAddress = "0.0.0.0";
listenAddress = "127.0.0.1";
webExternalUrl = "http://alert.prometheus.shack";
logLevel = "debug";
configuration = {
"global" = {
"smtp_smarthost" = "smtp.example.com:587";
@ -134,15 +134,10 @@
"receivers" = [
{
"name" = "team-admins";
"email_configs" = [
{
"to" = "devnull@example.com";
"send_resolved" = true;
}
];
"email_configs" = [ ];
"webhook_configs" = [
{
"url" = "https://example.com/prometheus-alerts";
"url" = "http://localhost:16320";
"send_resolved" = true;
}
];

View File

@ -26,7 +26,8 @@ in {
# <stockholm/makefu/2configs/audio/jack-on-pulse.nix>
# <stockholm/makefu/2configs/audio/realtime-audio.nix>
# <stockholm/makefu/2configs/vncserver.nix>
<stockholm/makefu/2configs/logging/server.nix>
## no need for dns logs anymore
# <stockholm/makefu/2configs/logging/server.nix>
# Services
# <stockholm/makefu/2configs/hydra/stockholm.nix>
@ -48,6 +49,7 @@ in {
<stockholm/makefu/2configs/bureautomation> # new hass entry point
<stockholm/makefu/2configs/bureautomation/led-fader.nix>
<stockholm/makefu/2configs/bureautomation/kalauerbot.nix>
# <stockholm/makefu/2configs/bureautomation/visitor-photostore.nix>
# <stockholm/makefu/2configs/bureautomation/mpd.nix> #mpd is only used for TTS, this is the web interface
<stockholm/makefu/2configs/mqtt.nix>

View File

@ -1,15 +1,20 @@
# BGT<NUMBER>
1. studio-link aufnehmen drücken (wichtig)
- markus 6407eb63@studio-link.de
- Felix1 1f1021b2@studio-link.de
- L33tFelix 4d47a82a@studio-link.de
- Ingo 03b33b4a@studio-link.de
1. studio-link aufnehmen drücken, schauen ob file größer wird (wichtig)
- markus 6407eb63@studio-link.de
- Felix1 1f1021b2@studio-link.de
- L33tFelix 842f85eb@studio-link.de
- Ingo 03b33b4a@studio-link.de
2. audiocity starten, 48000Hz einstellen, Audio-Device checken und aufnehmen drücken (wichtig)
* alternativ:
`$ pacmd list-sources | grep -e device.string -e 'name:' # keins der "monitor" devices`
`$ parecord --channels=1 -d alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00.analog-stereo bgt.wav`
3. obs starten und aufnehmen drücken (eher unwichtig)
4. darkice starten (wichtig)
4. klatschen
5. Hallo und herzlich Willkommen
5. darkice starten (wichtig)
6. klatschen
7. Hallihallo und Herzlich Willkommen
8. chapter-marker starten mit ctrl-u auf "H" von "Halli" (wichtig)
9. Blast markieren und ctrl-j drücken für neuen Eintrag
## Vorschläge
### Backlog von Picks und Lesefoo aus der letzten Woche
@ -31,3 +36,5 @@
## Lesefoo
## Picks
## Ende

View File

@ -13,8 +13,7 @@
gi
flashrom
mosquitto
nodemcu-uploader
esptool
esphome
# nix related
nix-index
nix-review

View File

@ -376,6 +376,12 @@ globalkeys = awful.util.table.join(
awful.key({ }, "XF86AudioMute", function ()
awful.util.spawn("@alsaUtils@/bin/amixer -q -D default sset Master toggle", false) end),
-- chapter-marker
awful.key({ "Control" }, "u", function () awful.spawn("@chaptermarker@/bin/chapter-start") end,
{description = "start the chapter marker",}),
awful.key({ "Control" }, "j", function () awful.spawn("@chaptermarker@/bin/chapter-mark") end,
{description = "create a chapter mark",}),
-- Prompt
awful.key({ modkey }, "r", function () awful.screen.focused().mypromptbox:run() end,
{description = "run prompt", group = "launcher"}),
@ -492,9 +498,16 @@ awful.rules.rules = {
properties = { floating = true } },
--{ rule = { class = "gimp" },
-- properties = { floating = true } },
-- Set Firefox to always map on tags number 2 of screen 1.
-- { rule = { class = "Firefox" },
-- properties = { tag = tags[1][2] } },
{ rule = { class = "Firefox" },
properties = { tag = tags[3] } },
{ rule = { class = "signal-desktop" },
properties = { tag = tags[4] } },
{ rule = { class = "telegram-desktop" },
properties = { tag = tags[4] } },
{ rule = { class = "mutt" },
properties = { tag = tags[5] } },
{ rule = { class = "mosh" },
properties = { tag = tags[2] } },
}
-- }}}
@ -569,7 +582,7 @@ local os = {
-- {{{ autostart
do
awful.spawn("urxvt", { tag = tags[1] }) -- dev shell
-- awful.spawn("urxvt", { tag = tags[1] }) -- dev shell
awful.spawn("urxvt -e mosh makefu@gum.i", { tag = tags[2] })
awful.spawn("firefox", { tag = tags[3] })
awful.spawn("telegram-desktop", { tag = tags[4] })

View File

@ -1,4 +1,4 @@
{ coreutils, fetchFromGitHub, makeWrapper, xdotool, stdenv, ... }:
{ coreutils, fetchFromGitHub, makeWrapper, xclip, libnotify, stdenv, ... }:
stdenv.mkDerivation rec {
name = "chapter-marker-${version}";
@ -6,8 +6,8 @@ stdenv.mkDerivation rec {
src = fetchFromGitHub {
owner = "makefu";
repo = "chapter-marker";
rev = "7602b611fb3d67fdb8a86db23220074dfa9dfa1e";
sha256 = "0cwh650c3qhdrcvrqfzgrwpsnj4lbq64fw2sfwvnbxz94b4q36av";
rev = "71b9bb8bc4d6fa87de6bea8f42d5486d05cf5443";
sha256 = "13cvk24pwwyv9i21h57690s5niwkcrcvn8l24zfxwbgq0wwzw38x";
};
buildInputs = [ makeWrapper ];
@ -16,7 +16,8 @@ stdenv.mkDerivation rec {
let
path = stdenv.lib.makeBinPath [
coreutils
xdotool
libnotify
xclip
];
in
''

View File

@ -0,0 +1,3 @@
{ pkgs,... }:
# TODO: dependencies: coreutils, nx_game_info,
pkgs.writeScriptBin "nsrenamer" (builtins.readFile ./nsrenamer.sh)

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
set -euf
indir=$(dirname "$1")
inname=$(basename "$1")
out=$(nxgameinfo_cli "$1")
ext=${1##*.}
id=$(awk -F: '/├ Title ID:/{print $2}' <<<"$out" |xargs)
baseid=$(awk -F: '/Base Title ID:/{print $2}' <<<"$out" |xargs)
version=$(awk -F: '/├ Version:/{print $2}' <<<"$out" |xargs)
name=$(awk -F: '/Title Name/{print $2}' <<<"$out" | sed "s/[:']//g" | xargs )
type=$(awk -F: '/Type:/{print $2}' <<<"$out" | xargs)
! test -n "$id" && echo "Title ID cannot be empty!" && exit 1
! test -n "$type" && echo "type cannot be empty!" && exit 1
if test "$type" == Base;then
! test -n "$name" && echo "Title Name cannot be empty!" && exit 1
NAME="$name [$id][v$version].$ext"
elif test "$type" == Update;then
! test -n "$name" && echo "Title Name cannot be empty!" && exit 1
! test -n "$version" && echo "Version cannot be empty!" && exit 1
NAME="$name [UPD][$id][v$version].$ext"
elif test "$type" == DLC;then
dlcname=$(jq -r --arg id "$id" '.[$id].name' < ~/.switch/titles.US.en.json | sed "s/[:']//g")
if test -n "$dlcname" ;then
NAME="$dlcname [DLC][$id][v$version].$ext"
else
! test -n "$name" && echo "dlcname cannot be found in titles.US.en.json and $name is empty!" && exit 1
NAME="$dlcname [DLC][$id][v$version].$ext"
fi
else
echo "unknown type '$type'"
exit 1
fi
newname=$indir/$NAME
if test "$NAME" == "${inname}";then
echo "name didn't change,doing nothing"
exit 0
fi
if test -e "$newname" ;then
echo "'$NAME' already exists, will not override"
exit 1
fi
if test -n "${FORCE:-}" ;then
CONFIRM=y
else
read -p "rename '$inname' to '$NAME' - [y/N]" CONFIRM
fi
if test -n "${FORCE:-}" -o "$CONFIRM" == "y" -o "$CONFIRM" == "Y";then
mv -nv "$1" "$newname"
else
echo "bailing out"
exit 1
fi