forked from the-distro/infra
parent
6a8f49f180
commit
84cfbdb050
|
@ -1,11 +1,12 @@
|
||||||
|
namespace: forkos
|
||||||
groups:
|
groups:
|
||||||
- name: ForkOS automation
|
- name: ForkOS automation
|
||||||
rules:
|
rules:
|
||||||
- alert: SyncFailedTooOften
|
- alert: SyncFailedTooOften
|
||||||
expr: 'changes(node_systemd_unit_state{name=~"ows.*.service",state="failed"}[24h]) > 2'
|
expr: 'changes(node_systemd_unit_state{name=~"ows.*.service",state="failed"}[1d]) > 2'
|
||||||
for: 30m
|
for: 30m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
annotations:
|
annotations:
|
||||||
summary: "Synchronization job {{ $labels.name }} has failed more than twice in the last 24 hours"
|
description: On {{ $labels.instance }}, the synchronization job has failed more than twice in the last 24 hours, check if there's a conflict or a stdenv change.
|
||||||
description: "On {{ $labels.instance }}, the synchronization job has failed more than twice in the last 24 hours, check if there's a conflict or a stdenv change."
|
summary: Synchronization job {{ $labels.name }} has failed more than twice in the last 24 hours
|
||||||
|
|
|
@ -1,102 +1,119 @@
|
||||||
|
namespace: postgres
|
||||||
groups:
|
groups:
|
||||||
- name: PostgreSQL
|
- name: PostgreSQL
|
||||||
rules:
|
rules:
|
||||||
|
- alert: PostgresqlTableNotAutoVacuumed
|
||||||
- alert: PostgresqlTableNotAutoVacuumed
|
expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10'
|
||||||
expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10'
|
labels:
|
||||||
for: 0m
|
severity: warning
|
||||||
labels:
|
annotations:
|
||||||
severity: warning
|
description: |-
|
||||||
annotations:
|
Table {{ $labels.relname }} has not been auto vacuumed for 10 days
|
||||||
summary: Postgresql table not auto vacuumed (instance {{ $labels.instance }})
|
VALUE = {{ $value }}
|
||||||
description: "Table {{ $labels.relname }} has not been auto vacuumed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
LABELS = {{ $labels }}
|
||||||
|
summary: Postgresql table not auto vacuumed (instance {{ $labels.instance }})
|
||||||
- alert: PostgresqlTableNotAutoAnalyzed
|
- alert: PostgresqlTableNotAutoAnalyzed
|
||||||
expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10'
|
expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10'
|
||||||
for: 0m
|
labels:
|
||||||
labels:
|
severity: warning
|
||||||
severity: warning
|
annotations:
|
||||||
annotations:
|
description: |-
|
||||||
summary: Postgresql table not auto analyzed (instance {{ $labels.instance }})
|
Table {{ $labels.relname }} has not been auto analyzed for 10 days
|
||||||
description: "Table {{ $labels.relname }} has not been auto analyzed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
- alert: PostgresqlDeadLocks
|
summary: Postgresql table not auto analyzed (instance {{ $labels.instance }})
|
||||||
expr: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5'
|
- alert: PostgresqlDeadLocks
|
||||||
for: 0m
|
expr: 'increase(pg_stat_database_deadlocks{datname!~"template.*|postgres"}[1m]) > 5'
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
annotations:
|
annotations:
|
||||||
summary: Postgresql dead locks (instance {{ $labels.instance }})
|
description: |-
|
||||||
description: "PostgreSQL has dead-locks\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
PostgreSQL has dead-locks
|
||||||
|
VALUE = {{ $value }}
|
||||||
- alert: PostgresqlHighRollbackRate
|
LABELS = {{ $labels }}
|
||||||
expr: 'sum by (namespace,datname) ((rate(pg_stat_database_xact_rollback{datname!~"template.*|postgres",datid!="0"}[3m])) / ((rate(pg_stat_database_xact_rollback{datname!~"template.*|postgres",datid!="0"}[3m])) + (rate(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[3m])))) > 0.02'
|
summary: Postgresql dead locks (instance {{ $labels.instance }})
|
||||||
for: 0m
|
- alert: PostgresqlHighRollbackRate
|
||||||
labels:
|
expr: 'sum by (namespace, datname) ((rate(pg_stat_database_xact_rollback{datid!="0",datname!~"template.*|postgres"}[3m])) / ((rate(pg_stat_database_xact_rollback{datid!="0",datname!~"template.*|postgres"}[3m])) + (rate(pg_stat_database_xact_commit{datid!="0",datname!~"template.*|postgres"}[3m])))) > 0.02'
|
||||||
severity: warning
|
labels:
|
||||||
annotations:
|
severity: warning
|
||||||
summary: Postgresql high rollback rate (instance {{ $labels.instance }})
|
annotations:
|
||||||
description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
description: |-
|
||||||
|
Ratio of transactions being aborted compared to committed is > 2 %
|
||||||
- alert: PostgresqlHighRateStatementTimeout
|
VALUE = {{ $value }}
|
||||||
expr: 'rate(postgresql_errors_total{type="statement_timeout"}[1m]) > 3'
|
LABELS = {{ $labels }}
|
||||||
for: 0m
|
summary: Postgresql high rollback rate (instance {{ $labels.instance }})
|
||||||
labels:
|
- alert: PostgresqlHighRateStatementTimeout
|
||||||
severity: critical
|
expr: 'rate(postgresql_errors_total{type="statement_timeout"}[1m]) > 3'
|
||||||
annotations:
|
labels:
|
||||||
summary: Postgresql high rate statement timeout (instance {{ $labels.instance }})
|
severity: critical
|
||||||
description: "Postgres transactions showing high rate of statement timeouts\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
annotations:
|
||||||
|
description: |-
|
||||||
- alert: PostgresqlHighRateDeadlock
|
Postgres transactions showing high rate of statement timeouts
|
||||||
expr: 'increase(postgresql_errors_total{type="deadlock_detected"}[1m]) > 1'
|
VALUE = {{ $value }}
|
||||||
for: 0m
|
LABELS = {{ $labels }}
|
||||||
labels:
|
summary: Postgresql high rate statement timeout (instance {{ $labels.instance }})
|
||||||
severity: critical
|
- alert: PostgresqlHighRateDeadlock
|
||||||
annotations:
|
expr: 'increase(postgresql_errors_total{type="deadlock_detected"}[1m]) > 1'
|
||||||
summary: Postgresql high rate deadlock (instance {{ $labels.instance }})
|
labels:
|
||||||
description: "Postgres detected deadlocks\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
severity: critical
|
||||||
|
annotations:
|
||||||
- alert: PostgresqlTooManyDeadTuples
|
description: |-
|
||||||
expr: '((pg_stat_user_tables_n_dead_tup > 10000) / (pg_stat_user_tables_n_live_tup + pg_stat_user_tables_n_dead_tup)) >= 0.1'
|
Postgres detected deadlocks
|
||||||
for: 2m
|
VALUE = {{ $value }}
|
||||||
labels:
|
LABELS = {{ $labels }}
|
||||||
severity: warning
|
summary: Postgresql high rate deadlock (instance {{ $labels.instance }})
|
||||||
annotations:
|
- alert: PostgresqlTooManyDeadTuples
|
||||||
summary: Postgresql too many dead tuples (instance {{ $labels.instance }})
|
expr: '((pg_stat_user_tables_n_dead_tup > 10000) / (pg_stat_user_tables_n_live_tup + pg_stat_user_tables_n_dead_tup)) >= 0.1'
|
||||||
description: "PostgreSQL dead tuples is too large\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
for: 2m
|
||||||
|
labels:
|
||||||
- alert: PostgresqlTooManyLocksAcquired
|
severity: warning
|
||||||
expr: '((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20'
|
annotations:
|
||||||
for: 2m
|
description: |-
|
||||||
labels:
|
PostgreSQL dead tuples is too large
|
||||||
severity: critical
|
VALUE = {{ $value }}
|
||||||
annotations:
|
LABELS = {{ $labels }}
|
||||||
summary: Postgresql too many locks acquired (instance {{ $labels.instance }})
|
summary: Postgresql too many dead tuples (instance {{ $labels.instance }})
|
||||||
description: "Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
- alert: PostgresqlTooManyLocksAcquired
|
||||||
|
expr: '((sum(pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.2'
|
||||||
- alert: PostgresqlBloatIndexHigh(>80%)
|
for: 2m
|
||||||
expr: 'pg_bloat_btree_bloat_pct > 80 and on (idxname) (pg_bloat_btree_real_size > 100000000)'
|
labels:
|
||||||
for: 1h
|
severity: critical
|
||||||
labels:
|
annotations:
|
||||||
severity: warning
|
description: |-
|
||||||
annotations:
|
Too many locks acquired on the database. If this alert happens frequently, we may need to increase the postgres setting max_locks_per_transaction.
|
||||||
summary: Postgresql bloat index high (> 80%) (instance {{ $labels.instance }})
|
VALUE = {{ $value }}
|
||||||
description: "The index {{ $labels.idxname }} is bloated. You should execute `REINDEX INDEX CONCURRENTLY {{ $labels.idxname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
LABELS = {{ $labels }}
|
||||||
|
summary: Postgresql too many locks acquired (instance {{ $labels.instance }})
|
||||||
- alert: PostgresqlBloatTableHigh(>80%)
|
- alert: PostgresqlBloatIndexHigh(>80%)
|
||||||
expr: 'pg_bloat_table_bloat_pct > 80 and on (relname) (pg_bloat_table_real_size > 200000000)'
|
expr: 'pg_bloat_btree_bloat_pct > 80 and on (idxname) (pg_bloat_btree_real_size > 1e+08)'
|
||||||
for: 1h
|
for: 1h
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
annotations:
|
annotations:
|
||||||
summary: Postgresql bloat table high (> 80%) (instance {{ $labels.instance }})
|
description: |-
|
||||||
description: "The table {{ $labels.relname }} is bloated. You should execute `VACUUM {{ $labels.relname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
The index {{ $labels.idxname }} is bloated. You should execute `REINDEX INDEX CONCURRENTLY {{ $labels.idxname }};`
|
||||||
|
VALUE = {{ $value }}
|
||||||
- alert: PostgresqlInvalidIndex
|
LABELS = {{ $labels }}
|
||||||
expr: 'pg_genaral_index_info_pg_relation_size{indexrelname=~".*ccnew.*"}'
|
summary: Postgresql bloat index high (> 80%) (instance {{ $labels.instance }})
|
||||||
for: 6h
|
- alert: PostgresqlBloatTableHigh(>80%)
|
||||||
labels:
|
expr: 'pg_bloat_table_bloat_pct > 80 and on (relname) (pg_bloat_table_real_size > 2e+08)'
|
||||||
severity: warning
|
for: 1h
|
||||||
annotations:
|
labels:
|
||||||
summary: Postgresql invalid index (instance {{ $labels.instance }})
|
severity: warning
|
||||||
description: "The table {{ $labels.relname }} has an invalid index: {{ $labels.indexrelname }}. You should execute `DROP INDEX {{ $labels.indexrelname }};`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
annotations:
|
||||||
|
description: |-
|
||||||
|
The table {{ $labels.relname }} is bloated. You should execute `VACUUM {{ $labels.relname }};`
|
||||||
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
|
summary: Postgresql bloat table high (> 80%) (instance {{ $labels.instance }})
|
||||||
|
- alert: PostgresqlInvalidIndex
|
||||||
|
expr: 'pg_genaral_index_info_pg_relation_size{indexrelname=~".*ccnew.*"}'
|
||||||
|
for: 6h
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
description: |-
|
||||||
|
The table {{ $labels.relname }} has an invalid index: {{ $labels.indexrelname }}. You should execute `DROP INDEX {{ $labels.indexrelname }};`
|
||||||
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
|
summary: Postgresql invalid index (instance {{ $labels.instance }})
|
||||||
|
|
|
@ -1,76 +1,101 @@
|
||||||
|
namespace: resources
|
||||||
groups:
|
groups:
|
||||||
- name: Host & hardware
|
- name: Host & hardware
|
||||||
rules:
|
rules:
|
||||||
- alert: HostOutOfMemory
|
- alert: HostOutOfMemory
|
||||||
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
for: 2m
|
for: 2m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
annotations:
|
annotations:
|
||||||
summary: Host out of memory (instance {{ $labels.instance }})
|
description: |-
|
||||||
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
Node memory is filling up (< 10% left)
|
||||||
- alert: HostMemoryUnderMemoryPressure
|
VALUE = {{ $value }}
|
||||||
expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
LABELS = {{ $labels }}
|
||||||
for: 2m
|
summary: Host out of memory (instance {{ $labels.instance }})
|
||||||
labels:
|
- alert: HostMemoryUnderMemoryPressure
|
||||||
severity: warning
|
expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
annotations:
|
for: 2m
|
||||||
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
labels:
|
||||||
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
severity: warning
|
||||||
- alert: HostMemoryIsUnderutilized
|
annotations:
|
||||||
expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
description: |-
|
||||||
for: 1w
|
The node is under heavy memory pressure. High rate of major page faults
|
||||||
labels:
|
VALUE = {{ $value }}
|
||||||
severity: info
|
LABELS = {{ $labels }}
|
||||||
annotations:
|
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
||||||
summary: Host Memory is underutilized (instance {{ $labels.instance }})
|
- alert: HostMemoryIsUnderutilized
|
||||||
description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
- alert: HostOutOfDiskSpace
|
for: 1w
|
||||||
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
labels:
|
||||||
for: 2m
|
severity: info
|
||||||
labels:
|
annotations:
|
||||||
severity: warning
|
description: |-
|
||||||
annotations:
|
Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})
|
||||||
summary: Host out of disk space (instance {{ $labels.instance }})
|
VALUE = {{ $value }}
|
||||||
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
LABELS = {{ $labels }}
|
||||||
- alert: HostDiskWillFillIn24Hours
|
summary: Host Memory is underutilized (instance {{ $labels.instance }})
|
||||||
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
- alert: HostOutOfDiskSpace
|
||||||
for: 2m
|
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
labels:
|
for: 2m
|
||||||
severity: warning
|
labels:
|
||||||
annotations:
|
severity: warning
|
||||||
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
annotations:
|
||||||
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
description: |-
|
||||||
- alert: HostCpuIsUnderutilized
|
Disk is almost full (< 10% left)
|
||||||
expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
VALUE = {{ $value }}
|
||||||
for: 1w
|
LABELS = {{ $labels }}
|
||||||
labels:
|
summary: Host out of disk space (instance {{ $labels.instance }})
|
||||||
severity: info
|
- alert: HostDiskWillFillIn24Hours
|
||||||
annotations:
|
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
summary: Host CPU is underutilized (instance {{ $labels.instance }})
|
for: 2m
|
||||||
description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
labels:
|
||||||
- alert: HostCpuStealNoisyNeighbor
|
severity: warning
|
||||||
expr: (avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
annotations:
|
||||||
for: 0m
|
description: |-
|
||||||
labels:
|
Filesystem is predicted to run out of space within the next 24 hours at current write rate
|
||||||
severity: warning
|
VALUE = {{ $value }}
|
||||||
annotations:
|
LABELS = {{ $labels }}
|
||||||
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
||||||
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
- alert: HostCpuIsUnderutilized
|
||||||
- alert: HostOomKillDetected
|
expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
for: 1w
|
||||||
for: 0m
|
labels:
|
||||||
labels:
|
severity: info
|
||||||
severity: warning
|
annotations:
|
||||||
annotations:
|
description: |-
|
||||||
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
CPU load is < 20% for 1 week. Consider reducing the number of CPUs.
|
||||||
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
VALUE = {{ $value }}
|
||||||
- alert: HostNetworkInterfaceSaturated
|
LABELS = {{ $labels }}
|
||||||
expr: ((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
summary: Host CPU is underutilized (instance {{ $labels.instance }})
|
||||||
for: 1m
|
- alert: HostCpuStealNoisyNeighbor
|
||||||
labels:
|
expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
severity: warning
|
labels:
|
||||||
annotations:
|
severity: warning
|
||||||
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
|
annotations:
|
||||||
description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
description: |-
|
||||||
|
CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.
|
||||||
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
|
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
||||||
|
- alert: HostOomKillDetected
|
||||||
|
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
description: |-
|
||||||
|
OOM kill detected
|
||||||
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
|
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
||||||
|
- alert: HostNetworkInterfaceSaturated
|
||||||
|
expr: ((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
description: |-
|
||||||
|
The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded.
|
||||||
|
VALUE = {{ $value }}
|
||||||
|
LABELS = {{ $labels }}
|
||||||
|
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
|
||||||
|
|
|
@ -9,6 +9,18 @@ let
|
||||||
inherit (lib) mkEnableOption mkIf;
|
inherit (lib) mkEnableOption mkIf;
|
||||||
|
|
||||||
mimirPort = config.services.mimir.configuration.server.http_listen_port;
|
mimirPort = config.services.mimir.configuration.server.http_listen_port;
|
||||||
|
|
||||||
|
alerts = pkgs.runCommand "mimir-alerts-checked" {
|
||||||
|
src = ./alerts;
|
||||||
|
nativeBuildInputs = with pkgs; [ mimir ];
|
||||||
|
} ''
|
||||||
|
mkdir $out
|
||||||
|
cp -R $src $out/anonymous/
|
||||||
|
chmod -R +w $out
|
||||||
|
mimirtool rules check --rule-dirs=$out/anonymous
|
||||||
|
mimirtool rules lint --rule-dirs=$out/anonymous
|
||||||
|
diff -r $src $out/anonymous
|
||||||
|
'';
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.bagel.services.prometheus.enable = mkEnableOption "Prometheus scraper";
|
options.bagel.services.prometheus.enable = mkEnableOption "Prometheus scraper";
|
||||||
|
@ -60,10 +72,7 @@ in
|
||||||
blocks_storage.backend = "s3";
|
blocks_storage.backend = "s3";
|
||||||
ruler_storage = {
|
ruler_storage = {
|
||||||
backend = "local";
|
backend = "local";
|
||||||
local.directory = pkgs.runCommand "mimir-rules" {} ''
|
local.directory = alerts;
|
||||||
mkdir -p $out
|
|
||||||
ln -s ${./alerts} $out/anonymous
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
alertmanager = {
|
alertmanager = {
|
||||||
|
|
Loading…
Reference in a new issue