diff --git a/services/monitoring/lgtm/alerts/forkos.yaml b/services/monitoring/lgtm/alerts/forkos.yaml index d48318a..e6dc3f6 100644 --- a/services/monitoring/lgtm/alerts/forkos.yaml +++ b/services/monitoring/lgtm/alerts/forkos.yaml @@ -1,11 +1,12 @@ +namespace: forkos groups: - - name: ForkOS automation - rules: - - alert: SyncFailedTooOften - expr: 'changes(node_systemd_unit_state{name=~"ows.*.service",state="failed"}[24h]) > 2' - for: 30m - labels: - severity: critical - annotations: - summary: "Synchronization job {{ $labels.name }} has failed more than twice in the last 24 hours" - description: "On {{ $labels.instance }}, the synchronization job has failed more than twice in the last 24 hours, check if there's a conflict or a stdenv change." + - name: ForkOS automation + rules: + - alert: SyncFailedTooOften + expr: 'changes(node_systemd_unit_state{name=~"ows.*.service",state="failed"}[1d]) > 2' + for: 30m + labels: + severity: critical + annotations: + description: On {{ $labels.instance }}, the synchronization job has failed more than twice in the last 24 hours, check if there's a conflict or a stdenv change. + summary: Synchronization job {{ $labels.name }} has failed more than twice in the last 24 hours diff --git a/services/monitoring/lgtm/alerts/postgres.yml b/services/monitoring/lgtm/alerts/postgres.yml index efa3b4a..c9a229e 100644 --- a/services/monitoring/lgtm/alerts/postgres.yml +++ b/services/monitoring/lgtm/alerts/postgres.yml @@ -1,102 +1,119 @@ +namespace: postgres groups: -- name: PostgreSQL - rules: - - - alert: PostgresqlTableNotAutoVacuumed - expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10' - for: 0m - labels: - severity: warning - annotations: - summary: Postgresql table not auto vacuumed (instance {{ $labels.instance }}) - description: "Table {{ $labels.relname }} has not been auto vacuumed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlTableNotAutoAnalyzed - expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10' - for: 0m - labels: - severity: warning - annotations: - summary: Postgresql table not auto analyzed (instance {{ $labels.instance }}) - description: "Table {{ $labels.relname }} has not been auto analyzed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlDeadLocks - expr: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5' - for: 0m - labels: - severity: warning - annotations: - summary: Postgresql dead locks (instance {{ $labels.instance }}) - description: "PostgreSQL has dead-locks\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlHighRollbackRate - expr: 'sum by (namespace,datname) ((rate(pg_stat_database_xact_rollback{datname!~"template.*|postgres",datid!="0"}[3m])) / ((rate(pg_stat_database_xact_rollback{datname!~"template.*|postgres",datid!="0"}[3m])) + (rate(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[3m])))) > 0.02' - for: 0m - labels: - severity: warning - annotations: - summary: Postgresql high rollback rate (instance {{ $labels.instance }}) - description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlHighRateStatementTimeout - expr: 'rate(postgresql_errors_total{type="statement_timeout"}[1m]) > 3' - for: 0m - labels: - severity: critical - annotations: - summary: Postgresql high rate statement timeout (instance {{ $labels.instance }}) - description: "Postgres transactions showing high rate of statement timeouts\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlHighRateDeadlock - expr: 'increase(postgresql_errors_total{type="deadlock_detected"}[1m]) > 1' - for: 0m - labels: - severity: critical - annotations: - summary: Postgresql high rate deadlock (instance {{ $labels.instance }}) - description: "Postgres detected deadlocks\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlTooManyDeadTuples - expr: '((pg_stat_user_tables_n_dead_tup > 10000) / (pg_stat_user_tables_n_live_tup + pg_stat_user_tables_n_dead_tup)) >= 0.1' - for: 2m - labels: - severity: warning - annotations: - summary: Postgresql too many dead tuples (instance {{ $labels.instance }}) - description: "PostgreSQL dead tuples is too large\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlTooManyLocksAcquired - expr: '((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20' - for: 2m - labels: - severity: critical - annotations: - summary: Postgresql too many locks acquired (instance {{ $labels.instance }}) - description: "Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlBloatIndexHigh(>80%) - expr: 'pg_bloat_btree_bloat_pct > 80 and on (idxname) (pg_bloat_btree_real_size > 100000000)' - for: 1h - labels: - severity: warning - annotations: - summary: Postgresql bloat index high (> 80%) (instance {{ $labels.instance }}) - description: "The index {{ $labels.idxname }} is bloated. You should execute `REINDEX INDEX CONCURRENTLY {{ $labels.idxname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlBloatTableHigh(>80%) - expr: 'pg_bloat_table_bloat_pct > 80 and on (relname) (pg_bloat_table_real_size > 200000000)' - for: 1h - labels: - severity: warning - annotations: - summary: Postgresql bloat table high (> 80%) (instance {{ $labels.instance }}) - description: "The table {{ $labels.relname }} is bloated. You should execute `VACUUM {{ $labels.relname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: PostgresqlInvalidIndex - expr: 'pg_genaral_index_info_pg_relation_size{indexrelname=~".*ccnew.*"}' - for: 6h - labels: - severity: warning - annotations: - summary: Postgresql invalid index (instance {{ $labels.instance }}) - description: "The table {{ $labels.relname }} has an invalid index: {{ $labels.indexrelname }}. You should execute `DROP INDEX {{ $labels.indexrelname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + - name: PostgreSQL + rules: + - alert: PostgresqlTableNotAutoVacuumed + expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10' + labels: + severity: warning + annotations: + description: |- + Table {{ $labels.relname }} has not been auto vacuumed for 10 days + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql table not auto vacuumed (instance {{ $labels.instance }}) + - alert: PostgresqlTableNotAutoAnalyzed + expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10' + labels: + severity: warning + annotations: + description: |- + Table {{ $labels.relname }} has not been auto analyzed for 10 days + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql table not auto analyzed (instance {{ $labels.instance }}) + - alert: PostgresqlDeadLocks + expr: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5' + labels: + severity: warning + annotations: + description: |- + PostgreSQL has dead-locks + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql dead locks (instance {{ $labels.instance }}) + - alert: PostgresqlHighRollbackRate + expr: 'sum by (namespace, datname) ((rate(pg_stat_database_xact_rollback{datid!="0",datname!~"template.*|postgres"}[3m])) / ((rate(pg_stat_database_xact_rollback{datid!="0",datname!~"template.*|postgres"}[3m])) + (rate(pg_stat_database_xact_commit{datid!="0",datname!~"template.*|postgres"}[3m])))) > 0.02' + labels: + severity: warning + annotations: + description: |- + Ratio of transactions being aborted compared to committed is > 2 % + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql high rollback rate (instance {{ $labels.instance }}) + - alert: PostgresqlHighRateStatementTimeout + expr: 'rate(postgresql_errors_total{type="statement_timeout"}[1m]) > 3' + labels: + severity: critical + annotations: + description: |- + Postgres transactions showing high rate of statement timeouts + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql high rate statement timeout (instance {{ $labels.instance }}) + - alert: PostgresqlHighRateDeadlock + expr: 'increase(postgresql_errors_total{type="deadlock_detected"}[1m]) > 1' + labels: + severity: critical + annotations: + description: |- + Postgres detected deadlocks + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql high rate deadlock (instance {{ $labels.instance }}) + - alert: PostgresqlTooManyDeadTuples + expr: '((pg_stat_user_tables_n_dead_tup > 10000) / (pg_stat_user_tables_n_live_tup + pg_stat_user_tables_n_dead_tup)) >= 0.1' + for: 2m + labels: + severity: warning + annotations: + description: |- + PostgreSQL dead tuples is too large + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql too many dead tuples (instance {{ $labels.instance }}) + - alert: PostgresqlTooManyLocksAcquired + expr: '((sum(pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.2' + for: 2m + labels: + severity: critical + annotations: + description: |- + Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction. + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql too many locks acquired (instance {{ $labels.instance }}) + - alert: PostgresqlBloatIndexHigh(>80%) + expr: 'pg_bloat_btree_bloat_pct > 80 and on (idxname) (pg_bloat_btree_real_size > 1e+08)' + for: 1h + labels: + severity: warning + annotations: + description: |- + The index {{ $labels.idxname }} is bloated. You should execute `REINDEX INDEX CONCURRENTLY {{ $labels.idxname }};` + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql bloat index high (> 80%) (instance {{ $labels.instance }}) + - alert: PostgresqlBloatTableHigh(>80%) + expr: 'pg_bloat_table_bloat_pct > 80 and on (relname) (pg_bloat_table_real_size > 2e+08)' + for: 1h + labels: + severity: warning + annotations: + description: |- + The table {{ $labels.relname }} is bloated. You should execute `VACUUM {{ $labels.relname }};` + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql bloat table high (> 80%) (instance {{ $labels.instance }}) + - alert: PostgresqlInvalidIndex + expr: 'pg_genaral_index_info_pg_relation_size{indexrelname=~".*ccnew.*"}' + for: 6h + labels: + severity: warning + annotations: + description: |- + The table {{ $labels.relname }} has an invalid index: {{ $labels.indexrelname }}. You should execute `DROP INDEX {{ $labels.indexrelname }};` + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Postgresql invalid index (instance {{ $labels.instance }}) diff --git a/services/monitoring/lgtm/alerts/resources.yaml b/services/monitoring/lgtm/alerts/resources.yaml index fecf41f..84b52cf 100644 --- a/services/monitoring/lgtm/alerts/resources.yaml +++ b/services/monitoring/lgtm/alerts/resources.yaml @@ -1,76 +1,101 @@ +namespace: resources groups: - - name: Host & hardware - rules: - - alert: HostOutOfMemory - expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 2m - labels: - severity: warning - annotations: - summary: Host out of memory (instance {{ $labels.instance }}) - description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostMemoryUnderMemoryPressure - expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 2m - labels: - severity: warning - annotations: - summary: Host memory under memory pressure (instance {{ $labels.instance }}) - description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostMemoryIsUnderutilized - expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 1w - labels: - severity: info - annotations: - summary: Host Memory is underutilized (instance {{ $labels.instance }}) - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostOutOfDiskSpace - expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 2m - labels: - severity: warning - annotations: - summary: Host out of disk space (instance {{ $labels.instance }}) - description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostDiskWillFillIn24Hours - expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 2m - labels: - severity: warning - annotations: - summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) - description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostCpuIsUnderutilized - expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 1w - labels: - severity: info - annotations: - summary: Host CPU is underutilized (instance {{ $labels.instance }}) - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostCpuStealNoisyNeighbor - expr: (avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 0m - labels: - severity: warning - annotations: - summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) - description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostOomKillDetected - expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 0m - labels: - severity: warning - annotations: - summary: Host OOM kill detected (instance {{ $labels.instance }}) - description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkInterfaceSaturated - expr: ((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} - for: 1m - labels: - severity: warning - annotations: - summary: Host Network Interface Saturated (instance {{ $labels.instance }}) - description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - + - name: Host & hardware + rules: + - alert: HostOutOfMemory + expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 2m + labels: + severity: warning + annotations: + description: |- + Node memory is filling up (< 10% left) + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host out of memory (instance {{ $labels.instance }}) + - alert: HostMemoryUnderMemoryPressure + expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 2m + labels: + severity: warning + annotations: + description: |- + The node is under heavy memory pressure. High rate of major page faults + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host memory under memory pressure (instance {{ $labels.instance }}) + - alert: HostMemoryIsUnderutilized + expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 1w + labels: + severity: info + annotations: + description: |- + Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }}) + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host Memory is underutilized (instance {{ $labels.instance }}) + - alert: HostOutOfDiskSpace + expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 2m + labels: + severity: warning + annotations: + description: |- + Disk is almost full (< 10% left) + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host out of disk space (instance {{ $labels.instance }}) + - alert: HostDiskWillFillIn24Hours + expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 2m + labels: + severity: warning + annotations: + description: |- + Filesystem is predicted to run out of space within the next 24 hours at current write rate + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) + - alert: HostCpuIsUnderutilized + expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 1w + labels: + severity: info + annotations: + description: |- + CPU load is < 20% for 1 week. Consider reducing the number of CPUs. + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host CPU is underutilized (instance {{ $labels.instance }}) + - alert: HostCpuStealNoisyNeighbor + expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + labels: + severity: warning + annotations: + description: |- + CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) + - alert: HostOomKillDetected + expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + labels: + severity: warning + annotations: + description: |- + OOM kill detected + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host OOM kill detected (instance {{ $labels.instance }}) + - alert: HostNetworkInterfaceSaturated + expr: ((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} + for: 1m + labels: + severity: warning + annotations: + description: |- + The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded. + VALUE = {{ $value }} + LABELS = {{ $labels }} + summary: Host Network Interface Saturated (instance {{ $labels.instance }}) diff --git a/services/monitoring/lgtm/mimir.nix b/services/monitoring/lgtm/mimir.nix index 3c771c5..782832f 100644 --- a/services/monitoring/lgtm/mimir.nix +++ b/services/monitoring/lgtm/mimir.nix @@ -9,6 +9,18 @@ let inherit (lib) mkEnableOption mkIf; mimirPort = config.services.mimir.configuration.server.http_listen_port; + + alerts = pkgs.runCommand "mimir-alerts-checked" { + src = ./alerts; + nativeBuildInputs = with pkgs; [ mimir ]; + } '' + mkdir $out + cp -R $src $out/anonymous/ + chmod -R +w $out + mimirtool rules check --rule-dirs=$out/anonymous + mimirtool rules lint --rule-dirs=$out/anonymous + diff -r $src $out/anonymous + ''; in { options.bagel.services.prometheus.enable = mkEnableOption "Prometheus scraper"; @@ -60,10 +72,7 @@ in blocks_storage.backend = "s3"; ruler_storage = { backend = "local"; - local.directory = pkgs.runCommand "mimir-rules" {} '' - mkdir -p $out - ln -s ${./alerts} $out/anonymous - ''; + local.directory = alerts; }; alertmanager = {