From 0db08b6881f7d9651ff80a33a74707f4e01a4272 Mon Sep 17 00:00:00 2001 From: zowoq <59103226+zowoq@users.noreply.github.com> Date: Fri, 11 Aug 2023 18:41:45 +1000 Subject: [PATCH] modules/nixos/monitoring: add alertmanager, matrix-alertmanager --- hosts/web02/secrets.yaml | 5 +- modules/nixos/monitoring/alert-rules.nix | 191 ++++++++++++++++++ modules/nixos/monitoring/default.nix | 1 + .../nixos/monitoring/matrix-alertmanager.nix | 46 +++++ modules/nixos/monitoring/prometheus.nix | 61 ++++++ secrets.yaml | 8 +- 6 files changed, 308 insertions(+), 4 deletions(-) create mode 100644 modules/nixos/monitoring/alert-rules.nix create mode 100644 modules/nixos/monitoring/matrix-alertmanager.nix diff --git a/hosts/web02/secrets.yaml b/hosts/web02/secrets.yaml index 8722ace..cc3bb63 100644 --- a/hosts/web02/secrets.yaml +++ b/hosts/web02/secrets.yaml @@ -1,4 +1,5 @@ ssh_host_ed25519_key: ENC[AES256_GCM,data:mp33XErF8FL7/rxKUsXiVijkCcDlSmtopkxTA/tDpcVx1ft6e9/YIoGYvLwxMhwowlEjktuANnZXo0tPHoCaiRdsFL2XMlNhbMHz5PKdIwpQyGysbqJKzVSa81F8RS1IG0sY8YGaCXsOfWPBbNJFTapT+9Xb6niRwiQkwwPAxBN9zdS+nMvllNHEZSv4RQ84uKzvyaLPB9N3GU7GHPq5UzBYNO1eH0eHoDEvv3JI6W/C8dws4liAwCSoCOF0RTdBpVKZvlf2DMYCKEZYWlnyBvq/NVJr2JujNDhtKOozL0tVLCkh8QQrLiWucN1yCMiy+1VS5yqmvqa6Gvc4P9hgmMHRjA37oJtDrik4SWQZq0VfxJ00oC3ByGmH6mR4AZY/BTPMnRAgF/m3rjryEd4hCjlQ1jJhdWmRTchruK7oSz/FrZmtCp/fVsRulcJ2JmU2gEQF0rfP9+eq/LWLD0FPvsp9ytf18wPMAY6fYj7vQGj5dx9ZB6oJ0RxlgAhPFhjAztjmMQD43oSsNnm9FKISS5Vv07tQDQBUHe3d,iv:Z6SfUFsjfRaVc23CNM1NE4/c92MLmbdEXilPJomX9qM=,tag:xknd9rqBVvUg69ICvhXHcA==,type:str] +nix-community-matrix-bot-token: ENC[AES256_GCM,data:uWqZe3tC5bGSC78SfVH0uwjnHBU0RdgglOlVUQo99AYykdzeAH9toTk2ZgtP97L1kKdyQo70f7QcNSCqtbmB0Q==,iv:xPob8PjdF0ha9u4lwFOWRJvSD+xUMIL6uw6OogMVP+g=,tag:gx0tIiiH6eNiapcEVZaiBw==,type:str] sops: kms: [] gcp_kms: [] @@ -59,8 +60,8 @@ sops: QnJZZzN1a1M5b1dwa3hvL3ZHYkpxQUkK1g9sQB0UHl9coaznjIn4WDpQv21Y8cl9 LNqnv0Q6KrxNliq2JEJoEpjD5+xTcqV/5FgylKhtdNWUZ0eAX8taog== -----END AGE ENCRYPTED FILE----- - lastmodified: "2023-07-29T05:26:34Z" - mac: ENC[AES256_GCM,data:HS8Jr5pHtANiytEOAYYja3b+FxyCb858pTFZvi1ZZ2NBkjRmkOY8UEzoL+dEJQ2RQ49l3GktIwu3oBwkjjoBHC7cqo5VfwB7a23u28iWwfiHduZMOOu4xHg6vsUCtScu1tr3bJexfVu47RHI/su/ds3UWk6eJKBm49MzcGTI7lU=,iv:J1lgkh9mSmd3iUf9pkvJAAsPgDZQsNtjMeBTwm+nhdQ=,tag:3IMkozS5d6jRoz+Gl8K0BA==,type:str] + lastmodified: "2023-08-12T01:00:57Z" + mac: ENC[AES256_GCM,data:GqsV7ULY9kWLooThcpJulxkJgxP92kY9OEWETuvO1Do9uiUaJUpmmPcwu7mhAEZzys4U+wZEfpRvfrrJanCotGCeNsfGh9zAbgHn4T5P8wqOyTWns5qaQDQki1Fs83CyGuCUjwPUWcH6euLYUlWANYcUbIZms9sb1l0bo7MxZ18=,iv:xqqpUsc5nfHmB+DX9S5fNPrdDqPBd+IPtlr3GnS/jfU=,tag:pPLlZ2q8ZxQJFHBxGTJRdA==,type:str] pgp: [] unencrypted_suffix: _unencrypted version: 3.7.3 diff --git a/modules/nixos/monitoring/alert-rules.nix b/modules/nixos/monitoring/alert-rules.nix new file mode 100644 index 0000000..0bfab98 --- /dev/null +++ b/modules/nixos/monitoring/alert-rules.nix @@ -0,0 +1,191 @@ +{ lib }: +lib.mapAttrsToList + (name: opts: { + alert = name; + expr = opts.condition; + for = opts.time or "2m"; + labels = { }; + annotations.description = opts.description; + # for matrix alert-receiver + annotations.summary = opts.description; + }) + ((lib.genAttrs [ + "borgbackup-job-github-org.service" + "borgbackup-job-nixpkgs-update.service" + ] + (name: { + condition = ''absent_over_time(task_last_run{name="${name}"}[1d])''; + description = "status of ${name} is unknown: no data for a day"; + }) + ) // { + prometheus_too_many_restarts = { + condition = ''changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager|telegraf"}[15m]) > 2''; + description = "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping"; + }; + + alert_manager_config_not_synced = { + condition = ''count(count_values("config_hash", alertmanager_config_hash)) > 1''; + description = "Configurations of AlertManager cluster instances are out of sync"; + }; + + #alert_manager_e2e_dead_man_switch = { + # condition = "vector(1)"; + # description = "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager."; + #}; + + prometheus_not_connected_to_alertmanager = { + condition = "prometheus_notifications_alertmanagers_discovered < 1"; + description = "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"; + }; + + prometheus_rule_evaluation_failures = { + condition = "increase(prometheus_rule_evaluation_failures_total[3m]) > 0"; + description = "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"; + }; + + prometheus_template_expansion_failures = { + condition = "increase(prometheus_template_text_expansion_failures_total[3m]) > 0"; + time = "0m"; + description = "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"; + }; + + filesystem_full_80percent = { + condition = ''disk_used_percent{mode!="ro"} >= 80''; + time = "10m"; + description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.path}} got less than 20% space left on its filesystem"; + }; + + filesystem_inodes_full = { + condition = ''disk_inodes_free / disk_inodes_total < 0.10''; + time = "10m"; + description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.path}} got less than 10% inodes left on its filesystem"; + }; + + daily_task_not_run = { + # give 6 hours grace period + condition = ''time() - task_last_run{state="ok",frequency="daily"} > (24 + 6) * 60 * 60''; + description = "{{$labels.host}}: {{$labels.name}} was not run in the last 24h"; + }; + + daily_task_failed = { + condition = ''task_last_run{state="fail"}''; + description = "{{$labels.host}}: {{$labels.name}} failed to run"; + }; + + nixpkgs_out_of_date = { + condition = ''(time() - flake_input_last_modified{input="nixpkgs"}) / (60*60*24) > 7''; + description = "{{$labels.host}}: nixpkgs flake is older than a week"; + }; + + swap_using_30percent = { + condition = ''mem_swap_total - (mem_swap_cached + mem_swap_free) > mem_swap_total * 0.3''; + time = "30m"; + description = "{{$labels.host}} is using 30% of its swap space for at least 30 minutes"; + }; + + # user@$uid.service and similar sometimes fail, we don't care about those services. + systemd_service_failed = { + condition = ''systemd_units_active_code{name!~"user@\\d+.service"} == 3''; + description = "{{$labels.host}} failed to (re)start service {{$labels.name}}"; + }; + + ram_using_95percent = { + condition = "mem_buffered + mem_free + mem_cached < mem_total * 0.05"; + time = "1h"; + description = "{{$labels.host}} is using at least 95% of its RAM for at least 1 hour"; + }; + + load15 = { + condition = ''system_load15 / system_n_cpus{instance!~"build.*.nix-community.org:9273"} >= 2.0''; + time = "10m"; + description = "{{$labels.host}} is running with load15 > 1 for at least 5 minutes: {{$value}}"; + }; + + reboot = { + condition = "system_uptime < 600"; + description = "{{$labels.host}} just rebooted"; + }; + + uptime = { + condition = ''system_uptime > (60*60*24*14)''; + description = "Uptime monster: {{$labels.host}} has been up for more than 14 days"; + }; + + telegraf_down = { + condition = ''min(up{job=~"telegraf"}) by (source, job, instance, org) == 0''; + time = "3m"; + description = "{{$labels.instance}}: {{$labels.job}} telegraf exporter from {{$labels.source}} is down"; + }; + + http = { + condition = "http_response_result_code != 0"; + description = "{{$labels.server}} : http request failed from {{$labels.instance}}: {{$labels.result}}"; + }; + + http_match_failed = { + condition = "http_response_response_string_match == 0"; + description = "{{$labels.server}} : http body not as expected; status code: {{$labels.status_code}}"; + }; + + connection_failed = { + condition = "net_response_result_code != 0"; + description = "{{$labels.server}}: connection to {{$labels.port}}({{$labels.protocol}}) failed from {{$labels.instance}}"; + }; + + zfs_errors = { + condition = "zfs_arcstats_l2_io_error + zfs_dmu_tx_error + zfs_arcstats_l2_writes_error > 0"; + description = "{{$labels.instance}} reports: {{$value}} ZFS IO errors"; + }; + + zpool_status = { + condition = "zpool_status_errors > 0"; + description = "{{$labels.instance}} reports: zpool {{$labels.name}} has {{$value}} errors"; + }; + + mdraid_degraded_disks = { + condition = "mdstat_degraded_disks > 0"; + description = "{{$labels.instance}}: raid {{$labels.dev}} has failed disks"; + }; + + # ignore devices that disabled S.M.A.R.T (example if attached via USB) + # Also ignore build02, build03 + smart_errors = { + condition = ''smart_device_health_ok{enabled!="Disabled", instance!~"(build02|build03).nix-community.org:9273"} != 1''; + description = "{{$labels.instance}}: S.M.A.R.T reports: {{$labels.device}} ({{$labels.model}}) has errors"; + }; + + oom_kills = { + condition = "increase(kernel_vmstat_oom_kill[5m]) > 0"; + description = "{{$labels.instance}}: OOM kill detected"; + }; + + unusual_disk_read_latency = { + condition = "rate(diskio_read_time[1m]) / rate(diskio_reads[1m]) > 0.1 and rate(diskio_reads[1m]) > 0"; + description = "{{$labels.instance}}: Disk latency is growing (read operations > 100ms)"; + }; + + unusual_disk_write_latency = { + condition = "rate(diskio_write_time[1m]) / rate(diskio_write[1m]) > 0.1 and rate(diskio_write[1m]) > 0"; + description = "{{$labels.instance}}: Disk latency is growing (write operations > 100ms)"; + }; + + ipv6_dad_check = { + condition = "ipv6_dad_failures_count > 0"; + description = "{{$labels.host}}: {{$value}} assigned ipv6 addresses have failed duplicate address check"; + }; + + host_memory_under_memory_pressure = { + condition = "rate(node_vmstat_pgmajfault[1m]) > 1000"; + description = "{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}"; + }; + + ext4_errors = { + condition = "ext4_errors_value > 0"; + description = "{{$labels.instance}}: ext4 has reported {{$value}} I/O errors: check /sys/fs/ext4/*/errors_count"; + }; + + alerts_silences_changed = { + condition = ''abs(delta(alertmanager_silences{state="active"}[1h])) >= 1''; + description = "alertmanager: number of active silences has changed: {{$value}}"; + }; + }) diff --git a/modules/nixos/monitoring/default.nix b/modules/nixos/monitoring/default.nix index 4dac878..8150c89 100644 --- a/modules/nixos/monitoring/default.nix +++ b/modules/nixos/monitoring/default.nix @@ -1,5 +1,6 @@ { imports = [ + ./matrix-alertmanager.nix ./prometheus.nix ./telegraf.nix ]; diff --git a/modules/nixos/monitoring/matrix-alertmanager.nix b/modules/nixos/monitoring/matrix-alertmanager.nix new file mode 100644 index 0000000..301bc17 --- /dev/null +++ b/modules/nixos/monitoring/matrix-alertmanager.nix @@ -0,0 +1,46 @@ +{ config, pkgs, ... }: +let + matrix-alertmanager-receiver = pkgs.buildGoModule rec { + pname = "matrix-alertmanager-receiver"; + version = "0.1.2"; + src = pkgs.fetchFromSourcehut { + owner = "~fnux"; + repo = "matrix-alertmanager-receiver"; + rev = version; + hash = "sha256-F6Cn0lmASAjWGEBCmyLdfz4r06fDTEfZQcynfA/RRtI="; + }; + vendorHash = "sha256-7tRCX9FzOsLXCTWWjLp3hr1kegt1dxsbCKfC7tICreo="; + }; +in +{ + sops.secrets.nix-community-matrix-bot-token = { }; + sops.templates."config.toml".content = '' + Homeserver = "https://matrix-client.matrix.org" + TargetRoomID = "!cBybDCkeRlSWfuaFvn:numtide.com" + MXID = "@nix-community-matrix-bot:matrix.org" + MXToken = "${config.sops.placeholder.nix-community-matrix-bot-token}" + HTTPPort = 9088 + HTTPAddress = "localhost" + ''; + sops.templates."config.toml".owner = "matrix-alertmanager-receiver"; + + users.users.matrix-alertmanager-receiver = { + isSystemUser = true; + group = "matrix-alertmanager-receiver"; + }; + users.groups.matrix-alertmanager-receiver = { }; + + systemd.services.matrix-alertmanager-receiver = { + description = "Matrix Alertmanager Receiver"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "simple"; + ExecStart = "${matrix-alertmanager-receiver}/bin/matrix-alertmanager-receiver -config ${config.sops.templates."config.toml".path}"; + Restart = "always"; + RestartSec = "10"; + User = "matrix-alertmanager-receiver"; + Group = "matrix-alertmanager-receiver"; + }; + }; +} diff --git a/modules/nixos/monitoring/prometheus.nix b/modules/nixos/monitoring/prometheus.nix index de4873e..a218fd4 100644 --- a/modules/nixos/monitoring/prometheus.nix +++ b/modules/nixos/monitoring/prometheus.nix @@ -1,6 +1,17 @@ +{ pkgs, ... }: { services.prometheus = { enable = true; + ruleFiles = [ + (pkgs.writeText "prometheus-rules.yml" (builtins.toJSON { + groups = [ + { + name = "alerting-rules"; + rules = import ./alert-rules.nix { inherit (pkgs) lib; }; + } + ]; + })) + ]; webExternalUrl = "https://prometheus.nix-community.org"; scrapeConfigs = [ { @@ -26,6 +37,15 @@ ]; } ]; + alertmanagers = [ + { + static_configs = [ + { + targets = [ "localhost:9093" ]; + } + ]; + } + ]; }; services.nginx.virtualHosts."prometheus.nix-community.org" = { @@ -33,4 +53,45 @@ forceSSL = true; locations."/".proxyPass = "http://localhost:9090"; }; + + services.prometheus.alertmanager = { + enable = true; + webExternalUrl = "https://alertmanager.nix-community.org"; + listenAddress = "[::1]"; + configuration = { + route = { + receiver = "default"; + routes = [ + { + group_by = [ "host" ]; + match_re.org = "nix-community"; + group_wait = "5m"; + group_interval = "5m"; + repeat_interval = "4h"; + receiver = "nix-community"; + } + ]; + }; + receivers = [ + { + name = "nix-community"; + webhook_configs = [ + { + url = "http://localhost:9088/alert"; + max_alerts = 5; + } + ]; + } + { + name = "default"; + } + ]; + }; + }; + + services.nginx.virtualHosts."alertmanager.nix-community.org" = { + enableACME = true; + forceSSL = true; + locations."/".proxyPass = "http://localhost:9093"; + }; } diff --git a/secrets.yaml b/secrets.yaml index a6fbd14..ef21266 100644 --- a/secrets.yaml +++ b/secrets.yaml @@ -11,6 +11,10 @@ nix-community-alert-bot: username: ENC[AES256_GCM,data:nA+jbVz3HtDgfDC+y2J4818=,iv:8Q7roobuWuXMnJXWKEJRjKlMYmvFUm5eodX5htLbVHY=,tag:r0FEOL9PByNYtL17hU7ApQ==,type:str] home-server: ENC[AES256_GCM,data:k6rsrEvyCUQj,iv:TJBl7dYioATGVYOOETIajkaw+7LKsyrJDI0zmey3Uo4=,tag:ZuSahyrPlmj2qGp8vvuyOw==,type:str] password: ENC[AES256_GCM,data:a0DcDpeUNDZpPxDiLzk3,iv:fXpMBTbp5tmjWlhe5EieyvQ4+tVbuowTf26CevOj0oc=,tag:7VIFyHbia6aR8WabLOz9TA==,type:str] +nix-community-matrix-bot: + username: ENC[AES256_GCM,data:p37MEk8kk2lOoBDgxY4tgXhOAHs3vRDY,iv:xvRX+k/V2HwAF3Df4oXtRf/XzS1rriJJPZ8z1B2/fLo=,tag:NNovFZGV8XFlP8pCC5mmrQ==,type:str] + home-server: ENC[AES256_GCM,data:5W8LDIlG8RThrQ==,iv:DjCo2cs0NoYBno1XuQ8cM9B1l6l6ZudDgOV15fZhxBo=,tag:wgYNxPXcpejteURF1IdXoQ==,type:str] + password: ENC[AES256_GCM,data:4q4RHDDzPfoK8cqbr6K6crheEdur5V1YiddVJP35939vmXP+Sg==,iv:R9dDa1K3UW1wByRLazNJTz1GdT5nhux+q39X/UrL0t4=,tag:Cwt6ffjNCiGGt4atBs3TgQ==,type:str] nix-infra-bot: URL: ENC[AES256_GCM,data:ZKphMNdc0TjZTA==,iv:ufiUj89KXkDjbsFiKs4hBky2n/fZ6Y1oafKrNMPC2k4=,tag:z1A18GPaf6gSKgi8JORNtw==,type:str] User: ENC[AES256_GCM,data:TK7VamXfCTELo38vZQ==,iv:koHFX5tWA4kEOvLdXy8WkFF5GQX6NZpo8p5aGUc3WWo=,tag:AJNpBhXiTxoQ+h3UTgB1GQ==,type:str] @@ -83,8 +87,8 @@ sops: MkcvL1JyVFBJV0Y5RFFCMGN1OUFXdU0Kdx1wy6ZOOTg1a6VKaq52SMBvC26lMsW/ oMP+hmXc2WtoqZp+jZ9rrXz6cZW6/dO7CPqxl3aUEKg6BkXIwgyKeg== -----END AGE ENCRYPTED FILE----- - lastmodified: "2023-07-18T06:28:59Z" - mac: ENC[AES256_GCM,data:HpdZCHgZmxvNtgeAFhnwz1gL+MuJWZJiZ75p3+heiNftAhiR1CSIWB1uAcAm0SVN5/mDOWs9SKH2IQv2clra4hkVnELtNjZVt576+OSq7Dy1LXvFEyafp8Sb6nJfMN3FgT+Mx/Y4YcGiLhllX2crkLBfiJu32n++Aejj5I0RxNA=,iv:qjXkgjB1PBB2cGPsI5pELlRONfaDL1cT3NzFPdmMQUU=,tag:BAF2WUF7v/Tp5IkEQ3oVZA==,type:str] + lastmodified: "2023-08-12T00:50:11Z" + mac: ENC[AES256_GCM,data:E1MYlrlHS2fsCapkIPXx1ljOTgXnAqHqvOvk40Aubd6sJqYwGPVsOuH4uLZHTIrpeZYqX7e8VHaboV1qwobcIIUpvSWJV2AJGHj4cJpL43JAktp7ANJqMV4NoGjPqfIcNLCi6hJjM0wJDvNLglkQ7TPUQUoxNyp9knuQuaQNcgY=,iv:s+BU1mtqx14WZawZUxvYj4tmNtLGleFxkvsvpm/LIww=,tag:90eIIWQ8WoYDEDLzlqOIQg==,type:str] pgp: [] unencrypted_suffix: _unencrypted version: 3.7.3