2020-05-26 12:13:24 +00:00
|
|
|
# NixOps configuration for the hosts running Prometheus on a Cardano node
|
|
|
|
|
|
|
|
{ config, pkgs, lib, ... }:
|
|
|
|
|
|
|
|
{
|
|
|
|
|
|
|
|
imports = [
|
|
|
|
../secrets/cardano/grafana.nix
|
|
|
|
];
|
|
|
|
|
|
|
|
services = {
|
|
|
|
prometheus = {
|
|
|
|
enable = true;
|
2020-05-27 11:56:19 +00:00
|
|
|
webExternalUrl = "https://monitoring.mcwhirter.io/prometheus/";
|
2020-05-26 12:13:24 +00:00
|
|
|
extraFlags = [
|
|
|
|
"--storage.tsdb.retention.time 8760h"
|
|
|
|
];
|
2020-05-27 11:56:19 +00:00
|
|
|
exporters = {
|
|
|
|
node = {
|
|
|
|
enable = true;
|
|
|
|
openFirewall = true;
|
|
|
|
enabledCollectors = [
|
|
|
|
"systemd"
|
|
|
|
"tcpstat"
|
|
|
|
"conntrack"
|
|
|
|
"diskstats"
|
|
|
|
"entropy"
|
|
|
|
"filefd"
|
|
|
|
"filesystem"
|
|
|
|
"loadavg"
|
|
|
|
"meminfo"
|
|
|
|
"netdev"
|
|
|
|
"netstat"
|
|
|
|
"stat"
|
|
|
|
"time"
|
|
|
|
"ntp"
|
|
|
|
"timex"
|
|
|
|
"vmstat"
|
|
|
|
"logind"
|
|
|
|
"interrupts"
|
|
|
|
"ksmd"
|
|
|
|
"processes"
|
|
|
|
];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
#alertmanager = {
|
|
|
|
# enable = true;
|
|
|
|
# webExternalUrl = "https://monitoring.mcwhirter.io/alertmanager/";
|
|
|
|
# configuration = [
|
|
|
|
# ];
|
|
|
|
#};
|
|
|
|
#alertmanagers = [ {
|
|
|
|
# scheme = "http";
|
|
|
|
# path_prefix = "/";
|
|
|
|
# static_configs = [ {
|
|
|
|
# targets = [ "airgead.mcwhirter.io:9093" ];
|
|
|
|
# } ];
|
|
|
|
#} ];
|
2020-05-26 12:13:24 +00:00
|
|
|
rules = [ (builtins.toJSON {
|
|
|
|
groups = [
|
|
|
|
{
|
|
|
|
name = "system";
|
|
|
|
rules = [
|
|
|
|
{
|
|
|
|
alert = "node_down";
|
|
|
|
expr = "up == 0";
|
|
|
|
for = "5m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Node is down.";
|
|
|
|
description = "{{$labels.alias}} has been down for more than 5 minutes.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_systemd_service_failed";
|
|
|
|
expr = "node_systemd_unit_state{state=\"failed\"} == 1";
|
|
|
|
for = "4m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start.";
|
|
|
|
description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_filesystem_full_90percent";
|
|
|
|
expr = "sort(node_filesystem_free_bytes{device!=\"ramfs\"} < node_filesystem_size_bytes{device!=\"ramfs\"} * 0.1) / 1024^3";
|
|
|
|
for = "5m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Filesystem is running out of space soon.";
|
|
|
|
description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 10% space left on its filesystem.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_filesystem_full_in_4h";
|
|
|
|
expr = "predict_linear(node_filesystem_free_bytes{device!=\"ramfs\",device!=\"tmpfs\",fstype!=\"autofs\",fstype!=\"cd9660\"}[4h], 4*3600) <= 0";
|
|
|
|
for = "5m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Filesystem is running out of space in 4 hours.";
|
|
|
|
description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 4 hours";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_filedescriptors_full_in_3h";
|
|
|
|
expr = "predict_linear(node_filefd_allocated[1h], 3*3600) >= node_filefd_maximum";
|
|
|
|
for = "20m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours.";
|
|
|
|
description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_load1_90percent";
|
|
|
|
expr = "node_load1 / on(alias) count(node_cpu_seconds_total{mode=\"system\"}) by (alias) >= 0.9";
|
|
|
|
for = "1h";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Running on high load.";
|
|
|
|
description = "{{$labels.alias}} is running with > 90% total load for at least 1h.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_cpu_util_90percent";
|
|
|
|
expr = "100 - (avg by (alias) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100) >= 90";
|
|
|
|
for = "1h";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: High CPU utilization.";
|
|
|
|
description = "{{$labels.alias}} has total CPU utilization over 90% for at least 1h.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_ram_using_99percent";
|
|
|
|
expr = "node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes < node_memory_MemTotal_bytes * 0.01";
|
|
|
|
for = "30m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Using lots of RAM.";
|
|
|
|
description = "{{$labels.alias}} is using at least 90% of its RAM for at least 30 minutes now.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_swap_using_80percent";
|
|
|
|
expr = "node_memory_SwapTotal_bytes - (node_memory_SwapFree_bytes + node_memory_SwapCached_bytes) > node_memory_SwapTotal_bytes * 0.8";
|
|
|
|
for = "10m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Running out of swap soon.";
|
|
|
|
description = "{{$labels.alias}} is using 80% of its swap space for at least 10 minutes now.";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
alert = "node_time_unsync";
|
|
|
|
expr = "abs(node_timex_offset_seconds) > 0.050 or node_timex_sync_status != 1";
|
|
|
|
for = "1m";
|
|
|
|
labels = {
|
|
|
|
severity = "page";
|
|
|
|
};
|
|
|
|
annotations = {
|
|
|
|
summary = "{{$labels.alias}}: Clock out of sync with NTP";
|
|
|
|
description = "{{$labels.alias}} Local clock offset is too large or out of sync with NTP";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
];
|
|
|
|
}
|
|
|
|
];
|
|
|
|
})];
|
|
|
|
scrapeConfigs = [
|
|
|
|
{
|
|
|
|
job_name = "prometheus";
|
|
|
|
scrape_interval = "5s";
|
|
|
|
static_configs = [
|
|
|
|
{
|
|
|
|
targets = [
|
|
|
|
"localhost:9090"
|
|
|
|
];
|
2020-05-27 11:56:19 +00:00
|
|
|
labels = { alias = "prometheus"; };
|
2020-05-26 12:13:24 +00:00
|
|
|
}
|
|
|
|
];
|
|
|
|
}
|
|
|
|
{
|
|
|
|
job_name = "cardano-node";
|
|
|
|
scrape_interval = "10s";
|
|
|
|
static_configs = [
|
|
|
|
{
|
|
|
|
targets = [ "127.0.0.1:12798" ];
|
|
|
|
labels = { alias = "airgead"; };
|
|
|
|
}
|
|
|
|
];
|
|
|
|
}
|
|
|
|
{
|
|
|
|
job_name = "node";
|
|
|
|
scrape_interval = "10s";
|
|
|
|
static_configs = [
|
|
|
|
{
|
|
|
|
targets = [
|
|
|
|
"airgead.mcwhirter.io:9100"
|
|
|
|
];
|
|
|
|
labels = {
|
|
|
|
alias = "airgead.mcwhirter.io";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
];
|
|
|
|
}
|
|
|
|
];
|
|
|
|
};
|
|
|
|
|
|
|
|
grafana = {
|
|
|
|
enable = true;
|
|
|
|
addr = "0.0.0.0";
|
2020-05-27 11:56:19 +00:00
|
|
|
domain = "monitoring.mcwhirter.io";
|
|
|
|
rootUrl = "https://monitoring.mcwhirter.io/grafana";
|
2020-05-26 12:13:24 +00:00
|
|
|
security = {
|
|
|
|
adminPasswordFile = "/run/keys/grafana-apass"; # Where to find the password
|
|
|
|
};
|
|
|
|
auth = {
|
|
|
|
anonymous = {
|
|
|
|
enable = true; # Allow anonymous access
|
|
|
|
};
|
|
|
|
};
|
|
|
|
provision = {
|
|
|
|
enable = true;
|
2020-05-27 11:56:19 +00:00
|
|
|
#dashboards = [
|
|
|
|
# {
|
|
|
|
# name = "Node System Dashboard";
|
|
|
|
# folder = "General";
|
|
|
|
# options.path = ../monitoring/NodeSystemDashboard.json;
|
|
|
|
# }
|
|
|
|
#];
|
2020-05-26 12:13:24 +00:00
|
|
|
datasources = [
|
|
|
|
{
|
|
|
|
type = "prometheus";
|
|
|
|
name = "prometheus";
|
|
|
|
url = "http://localhost:9090/prometheus";
|
|
|
|
}
|
|
|
|
];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
nginx = {
|
2020-05-27 11:56:19 +00:00
|
|
|
enable = true; # Enable Nginx
|
2020-05-26 12:13:24 +00:00
|
|
|
recommendedGzipSettings = true;
|
|
|
|
recommendedOptimisation = true;
|
|
|
|
recommendedProxySettings = true;
|
|
|
|
recommendedTlsSettings = true;
|
2020-05-27 11:56:19 +00:00
|
|
|
virtualHosts."monitoring.mcwhirter.io" = { # Monitoring hostname
|
|
|
|
enableACME = true; # Use ACME certs
|
|
|
|
forceSSL = true; # Force SSL
|
|
|
|
locations = {
|
|
|
|
"/grafana/".proxyPass = "http://localhost:3000/"; # Proxy Grafana
|
|
|
|
"/prometheus/".extraConfig = ''
|
|
|
|
proxy_pass http://localhost:9090/prometheus/;
|
|
|
|
proxy_set_header Host $host;
|
|
|
|
proxy_set_header REMOTE_ADDR $remote_addr;
|
|
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
|
|
proxy_set_header X-Forwarded-Proto https;
|
|
|
|
'';
|
|
|
|
#"/alertmanager/".proxyPass = "http://localhost:9093/"; # Proxy Alert Manager
|
|
|
|
};
|
2020-05-26 12:13:24 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
users.groups.keys.members = [ "grafana" ]; # Required due to NixOps issue #1204
|
|
|
|
|
|
|
|
security.acme = {
|
|
|
|
acceptTerms = true;
|
|
|
|
certs = {
|
2020-05-27 11:56:19 +00:00
|
|
|
"monitoring.mcwhirter.io".email = "craige@mcwhirter.io";
|
2020-05-26 12:13:24 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|