Enable Mimir Alertmanager, add example alert #33

Merged
k900 merged 6 commits from alertmanager into main 2024-07-08 06:35:33 +00:00
20 changed files with 222 additions and 208 deletions

View file

@ -40,7 +40,6 @@
hydra.enable = true; hydra.enable = true;
hydra.dbi = "dbi:Pg:dbname=hydra;user=hydra"; hydra.dbi = "dbi:Pg:dbname=hydra;user=hydra";
}; };
bagel.meta.monitoring.address = "bagel-box.infra.forkos.org";
security.acme.acceptTerms = true; security.acme.acceptTerms = true;
security.acme.defaults.email = "infra@forkos.org"; security.acme.defaults.email = "infra@forkos.org";

View file

@ -24,7 +24,6 @@
}; };
}; };
}; };
bagel.meta.monitoring.address = "gerrit01.infra.forkos.org";
fileSystems."/gerrit-data" = { fileSystems."/gerrit-data" = {
device = "/dev/disk/by-uuid/d1062305-0dea-4740-9a27-b6b1691862a4"; device = "/dev/disk/by-uuid/d1062305-0dea-4740-9a27-b6b1691862a4";

View file

@ -24,8 +24,6 @@
}; };
}; };
bagel.meta.monitoring.address = "fodwatch.infra.forkos.org";
i18n.defaultLocale = "en_US.UTF-8"; i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05"; system.stateVersion = "24.05";

View file

@ -21,7 +21,6 @@
enable = true; enable = true;
domain = "netbox.forkos.org"; domain = "netbox.forkos.org";
}; };
bagel.meta.monitoring.address = "meta01.infra.forkos.org";
bagel.services.prometheus.enable = true; bagel.services.prometheus.enable = true;
bagel.services.loki.enable = true; bagel.services.loki.enable = true;
bagel.services.grafana.enable = true; bagel.services.grafana.enable = true;

View file

@ -0,0 +1,105 @@
{
config,
lib,
...
}:
let
cfg = config.bagel.monitoring.grafana-agent;
inherit (lib) mkEnableOption mkOption mkIf types;
passwordAsCredential = "\${CREDENTIALS_DIRECTORY}/password";
in
{
options.bagel.monitoring.grafana-agent = {
enable = (mkEnableOption "Grafana Agent") // { default = true; };
exporters = mkOption {
description = ''
Set of additional exporters to scrape.
The attribute name will be used as `job_name`
internally, which ends up exported as `job` label
on all metrics of that exporter.
'';
type = types.attrsOf (types.submodule {
options.port = mkOption {
description = "Exporter port";
type = types.int;
};
});
default = {};
};
};
config = mkIf cfg.enable {
age.secrets.grafana-agent-password.file = ../../secrets/metrics-push-password.age;
services.grafana-agent = {
enable = true;
credentials.password = config.age.secrets.grafana-agent-password.path;
settings = {
metrics = {
global.remote_write = [
{
url = "https://mimir.forkos.org/api/v1/push";
basic_auth = {
username = "promtail";
password_file = passwordAsCredential;
};
}
];

job_name is exposed as job in the resulting metrics.

I get that the previous version -- the pull based metrics collection using prometheus -- had one job per machine as well.

And I might be missing something. But I do feel like we can do with multiple jobs here. One job for each exporter, with the job_name set to the exporter name.

This allows one to use the metrics browser in Grafana to list all metrics of /one/ exporter by simply

  1. selecting "job" in "2. Select label to search in"
  2. and then in "3. Select (multiple) values for your labels" your job name (exporter name)

I find that helpful when wondering which metrics are actually exported and available.

And if you want all metrics of a single instance, you can throw together a simple regex with the instance label.
Regex, because instance contains both the hostname and a port.

Or add another static label, e.g. hostname, similar to what you do down below in the logs section with host.

Let me know what you think. I can do the implementation if you want.
config.bagel.monitoring.grafana-agent.exporters would need to become an attrset and all.

And the hostname thingy is as simple as

diff --git a/services/monitoring/agent.nix b/services/monitoring/agent.nix
index e538cb7..e52ea98 100644
--- a/services/monitoring/agent.nix
+++ b/services/monitoring/agent.nix
@@ -48,7 +48,10 @@ in
                 {
                   job_name = config.networking.hostName;
                   static_configs = [
-                    { targets = map (e: "localhost:" + (toString e.port)) config.bagel.monitoring.grafana-agent.exporters; }
+                    {
+                      targets = map (e: "localhost:" + (toString e.port)) config.bagel.monitoring.grafana-agent.exporters;
+                      labels.hostname = config.networking.hostName;;
+                    }
                   ];
                 }
               ];
`job_name` is exposed as `job` in the resulting metrics. I get that the previous version -- the pull based metrics collection using prometheus -- had one job per machine as well. And I might be missing something. But I do feel like we can do with multiple jobs here. One job for each exporter, with the `job_name` set to the exporter name. This allows one to use the metrics browser in Grafana to list all metrics of /one/ exporter by simply 1. selecting "job" in "2. Select label to search in" 1. and then in "3. Select (multiple) values for your labels" your job name (exporter name) I find that helpful when wondering which metrics are actually exported and available. And if you want all metrics of a single instance, you can throw together a simple regex with the `instance` label. Regex, because `instance` contains both the hostname and a port. Or add another static label, e.g. `hostname`, similar to what you do down below in the logs section with `host`. Let me know what you think. I can do the implementation if you want. `config.bagel.monitoring.grafana-agent.exporters` would need to become an attrset and all. And the `hostname` thingy is as simple as ```diff diff --git a/services/monitoring/agent.nix b/services/monitoring/agent.nix index e538cb7..e52ea98 100644 --- a/services/monitoring/agent.nix +++ b/services/monitoring/agent.nix @@ -48,7 +48,10 @@ in { job_name = config.networking.hostName; static_configs = [ - { targets = map (e: "localhost:" + (toString e.port)) config.bagel.monitoring.grafana-agent.exporters; } + { + targets = map (e: "localhost:" + (toString e.port)) config.bagel.monitoring.grafana-agent.exporters; + labels.hostname = config.networking.hostName;; + } ]; } ]; ```
Outdated
Review

Yeah, I can do that. I've never really bothered beyond the one job per machine thing but this can also work and if it makes life easier for people, I don't care either way.

Yeah, I can do that. I've never really bothered beyond the one job per machine thing but this can also work and if it makes life easier for people, I don't care either way.
global.external_labels.hostname = config.networking.hostName;
configs = [
{
name = config.networking.hostName;
scrape_configs = lib.mapAttrsToList (name: value: {
job_name = name;
static_configs = [
{ targets = "localhost:" + (toString value.port); }
];
}) config.bagel.monitoring.grafana-agent.exporters;
}
];
};
logs = {
global.clients = [
{
url = "https://loki.forkos.org/loki/api/v1/push";
basic_auth = {
username = "promtail";
password_file = passwordAsCredential;
};
}
];
configs = [
{
name = "journald";
scrape_configs = [
{
job_name = "system";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
];
}
];
}
];
positions_directory = "\${STATE_DIRECTORY}/positions";
};
emilylange marked this conversation as resolved Outdated
Outdated
Review

I actually did this intentionally, but I can add all the exporters manually too.

I actually did this intentionally, but I can add all the exporters manually too.

Whoops I misread the previous services.prometheus.exporters.node.enabledCollectors that this is meant to replace.
Sorry for that. Will drop my commit. One sec.

Whoops I misread the previous `services.prometheus.exporters.node.enabledCollectors` that this is meant to replace. Sorry for that. Will drop my commit. One sec.
integrations.node_exporter.enable_collectors = [
"processes"
"systemd"
];
};
};
};
}

View file

@ -2,6 +2,6 @@
imports = [ imports = [
./exporters ./exporters
./lgtm ./lgtm
./promtail.nix ./agent.nix
]; ];
} }

View file

@ -17,6 +17,6 @@ in
listenAddress = "0.0.0.0"; listenAddress = "0.0.0.0";
}; };
bagel.meta.monitoring.exporters = [ { port = 9102; } ]; bagel.monitoring.grafana-agent.exporters.cadvisor.port = 9102;
}; };
} }

View file

@ -1,37 +1,7 @@
{
config,
lib,
...
}:
let
inherit (lib) mkOption types;
in
{ {
imports = [ imports = [
./cadvisor.nix ./cadvisor.nix
./node.nix
./nginx.nix ./nginx.nix
./postgres.nix ./postgres.nix
]; ];
options.bagel = {
meta.monitoring = {
address = mkOption {
description = "Node's public address";
type = types.str;
};
exporters = mkOption {
description = "List of all exporters to scrape";
type = types.listOf (types.submodule {
options.port = mkOption {
description = "Exporter port";
type = types.int;
};
});
default = [];
};
};
};
config.networking.firewall.allowedTCPPorts = map (e: e.port) config.bagel.meta.monitoring.exporters;
} }

View file

@ -30,8 +30,6 @@ in
]; ];
}; };
bagel.meta.monitoring.exporters = [ bagel.monitoring.grafana-agent.exporters.nginxlog.port = 9103;
{ port = 9103; }
];
}; };
} }

View file

@ -1,25 +0,0 @@
{
config,
lib,
...
}:
let
cfg = config.bagel.monitoring.exporters.node;
inherit (lib) mkEnableOption mkIf;
in
{
options.bagel.monitoring.exporters.node.enable = (mkEnableOption "Standard node_exporter") // { default = true; };
config = mkIf cfg.enable {
services.prometheus.exporters.node = {
enable = true;
enabledCollectors = [
"processes"
"systemd"
];
port = 9101;
};
bagel.meta.monitoring.exporters = [ { port = 9101; } ];
};
}

View file

@ -24,8 +24,6 @@ in
services.postgresql.settings.shared_preload_libraries = "pg_stat_statements"; services.postgresql.settings.shared_preload_libraries = "pg_stat_statements";
bagel.meta.monitoring.exporters = [ bagel.monitoring.grafana-agent.exporters.postgres.port = 9104;
{ port = 9104; }
];
}; };
} }

View file

@ -0,0 +1,5 @@
groups:
- name: Demo alerts
rules:
- alert: Demo alert
expr: 1

View file

@ -2,6 +2,6 @@
imports = [ imports = [
./grafana.nix ./grafana.nix
./loki.nix ./loki.nix
./prometheus.nix ./mimir.nix
]; ];
} }

View file

@ -92,6 +92,7 @@ in
uid = "mimir"; uid = "mimir";
access = "proxy"; access = "proxy";
url = "http://127.0.0.1:9009/prometheus"; url = "http://127.0.0.1:9009/prometheus";
isDefault = true;
} }
{ {
name = "Loki"; name = "Loki";
@ -100,6 +101,17 @@ in
access = "proxy"; access = "proxy";
url = "http://127.0.0.1:9090/"; url = "http://127.0.0.1:9090/";
} }
{
name = "Mimir Alertmanager";
type = "alertmanager";
uid = "mimir-alertmanager";
access = "proxy";
url = "http://127.0.0.1:9009/";
jsonData = {
handleGrafanaManagedAlerts = true;
implementation = "mimir";
};
}
]; ];
}; };
}; };

View file

@ -12,8 +12,8 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
age.secrets = { age.secrets = {
loki-htpasswd = { metrics-push-htpasswd = {
file = ../../../secrets/loki-htpasswd.age; file = ../../../secrets/metrics-push-htpasswd.age;
owner = "nginx"; owner = "nginx";
}; };
loki-environment.file = ../../../secrets/loki-environment.age; loki-environment.file = ../../../secrets/loki-environment.age;
@ -93,7 +93,7 @@ in
forceSSL = true; forceSSL = true;
locations."/loki/api/v1/push" = { locations."/loki/api/v1/push" = {
proxyPass = "http://localhost:${toString config.services.loki.configuration.server.http_listen_port}"; proxyPass = "http://localhost:${toString config.services.loki.configuration.server.http_listen_port}";
basicAuthFile = config.age.secrets.loki-htpasswd.path; basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
}; };
}; };
}; };

View file

@ -0,0 +1,92 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.bagel.services.prometheus;
inherit (lib) mkEnableOption mkIf;
mimirPort = config.services.mimir.configuration.server.http_listen_port;
in
{
options.bagel.services.prometheus.enable = mkEnableOption "Prometheus scraper";
config = mkIf cfg.enable {
age.secrets = {
metrics-push-htpasswd = {
file = ../../../secrets/metrics-push-htpasswd.age;
owner = "nginx";
};
mimir-environment.file = ../../../secrets/mimir-environment.age;
};
services.mimir = {
enable = true;
extraFlags = ["--config.expand-env=true"];
configuration = {
target = "all,alertmanager";
multitenancy_enabled = false;
common.storage = {
backend = "s3";
s3 = {
endpoint = "s3.delroth.net";
bucket_name = "bagel-mimir";
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
access_key_id = "\${S3_KEY_ID}";
};
};
server = {
http_listen_port = 9009;
grpc_server_max_recv_msg_size = 104857600;
grpc_server_max_send_msg_size = 104857600;
grpc_server_max_concurrent_streams = 1000;
};
ingester.ring.replication_factor = 1;
blocks_storage.backend = "s3";
ruler_storage = {
backend = "local";
local.directory = pkgs.runCommand "mimir-rules" {} ''
mkdir -p $out
ln -s ${./alerts} $out/anonymous
'';
};
alertmanager = {
sharding_ring.replication_factor = 1;
fallback_config_file = pkgs.writers.writeYAML "alertmanager.yaml" {
route = {
group_by = ["alertname"];
receiver = "matrix";
};
receivers = [
{
name = "matrix";
}
];
};
};
alertmanager_storage.backend = "filesystem";
ruler.alertmanager_url = "http://localhost:${toString mimirPort}/alertmanager";
};
};
systemd.services.mimir.serviceConfig.EnvironmentFile = [ config.age.secrets.mimir-environment.path ];
services.nginx.virtualHosts."mimir.forkos.org" = {
enableACME = true;
forceSSL = true;
locations."/api/v1/push" = {
proxyPass = "http://localhost:${toString mimirPort}";
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
};
};
};
}

View file

@ -1,83 +0,0 @@
{
config,
lib,
nodes,
...
}:
let
cfg = config.bagel.services.prometheus;
inherit (lib) mkEnableOption mkIf;
forEachMachine = fn: map fn (builtins.attrValues nodes);
allMetas = forEachMachine (machine: {
name = machine.config.networking.hostName;
address = machine.config.bagel.meta.monitoring.address or null;
exporters = machine.config.bagel.meta.monitoring.exporters or [];
});
scrapableMetas = builtins.filter (m: m.address != null && m.exporters != []) allMetas;
toJobConfig = m: {
job_name = m.name;
static_configs = [
{ targets = map (e: m.address + ":" + (toString e.port)) m.exporters; }
];
};
jobConfigs = map toJobConfig scrapableMetas;
in
{
options.bagel.services.prometheus.enable = mkEnableOption "Prometheus scraper";
config = mkIf cfg.enable {
age.secrets.mimir-environment.file = ../../../secrets/mimir-environment.age;
services.prometheus = {
enable = true;
enableAgentMode = true;
listenAddress = "127.0.0.1";
port = 9001;
globalConfig.scrape_interval = "15s";
scrapeConfigs = jobConfigs;
remoteWrite = [
{ url = "http://localhost:9009/api/v1/push"; }
];
};
services.mimir = {
enable = true;
extraFlags = ["--config.expand-env=true"];
configuration = {
multitenancy_enabled = false;
common.storage = {
backend = "s3";
s3 = {
endpoint = "s3.delroth.net";
bucket_name = "bagel-mimir";
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
access_key_id = "\${S3_KEY_ID}";
};
};
server = {
http_listen_port = 9009;
grpc_server_max_recv_msg_size = 104857600;
grpc_server_max_send_msg_size = 104857600;
grpc_server_max_concurrent_streams = 1000;
};
ingester.ring.replication_factor = 1;
blocks_storage.backend = "s3";
ruler_storage = {
backend = "local";
local.directory = ./alerts;
};
};
};
systemd.services.mimir.serviceConfig.EnvironmentFile = [ config.age.secrets.mimir-environment.path ];
};
}

View file

@ -1,53 +0,0 @@
{
config,
lib,
...
}:
let
cfg = config.bagel.monitoring.promtail;
inherit (lib) mkEnableOption mkIf;
in
{
options.bagel.monitoring.promtail.enable = (mkEnableOption "Promtail log export") // { default = true; };
config = mkIf cfg.enable {
age.secrets.promtail-password = {
file = ../../secrets/promtail-password.age;
owner = "promtail";
};
services.promtail = {
enable = true;
configuration = {
server.disable = true;
clients = [
{
url = "https://loki.forkos.org/loki/api/v1/push";
basic_auth = {
username = "promtail";
password_file = config.age.secrets.promtail-password.path;
};
}
];
scrape_configs = [
{
job_name = "system";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
];
}
];
};
};
};
}