Compare commits

...

9 commits

Author SHA1 Message Date
eyjhb
e18bdf5e3b
gerd.nix cleanup, maybe? 2025-03-14 17:10:18 +01:00
eyjhb
5e571b6fd9
prometheus: increased scrape_timeout 2025-03-14 17:09:52 +01:00
eyjhb
f0346a3c38
monitoring: added nextcloud as well 2025-03-14 17:09:42 +01:00
eyjhb
cb121c5369
monitoring: added services 2025-03-14 16:45:42 +01:00
eyjhb
efb17ea7fa
grafana+prometheus: initial setup 2025-03-14 16:45:16 +01:00
eyjhb
a10111a791
nextcloud: moved admin into own ldap group 2025-03-14 16:41:44 +01:00
eyjhb
4e58a128d8
uptime-kuma: rename patch 2025-03-14 16:40:51 +01:00
eyjhb
fc37d7a802
easy-zfs-mounts: do not rely on fileSystems, so it can be used with impermanence 2025-03-14 16:40:23 +01:00
eyjhb
d6be5fefea
nginx: block all /metrics endpoints 2025-03-14 16:40:19 +01:00
26 changed files with 489 additions and 51 deletions

View file

@ -24,14 +24,12 @@
./gerd/services/wger
./gerd/services/searx.nix
./gerd/services/miniflux.nix
./gerd/services/matrix
./gerd/services/uptime-kuma.nix
./gerd/services/rallly
./gerd/services/notify
./gerd/services/monitoring
];
networking.hostName = "gerd";
@ -49,6 +47,8 @@
"safe/svcs/stalwart" = { mountpoint = "/srv/stalwart"; extra.options.quota = "5G"; };
"safe/svcs/synapse" = { mountpoint = "/srv/synapse"; extra.options.quota = "5G"; };
"safe/svcs/wger" = { mountpoint = "/srv/wger"; extra.options.quota = "5G"; };
"safe/svcs/prometheus" = { mountpoint = "/srv/prometheus"; extra.options.quota = "5G"; };
"safe/svcs/postgresql" = { mountpoint = "/srv/postgresql"; extra.options.quota = "5G"; };
"backup/postgresql" = { mountpoint = "/media/backup/postgresqlbackup"; extra.options.quota = "5G"; };
};

View file

@ -275,7 +275,7 @@ in {
user_id = name;
display_name = name; # required for nextcloud
membermail = mkProvisionEmail name;
groups = [ lconfig.groups.admin lconfig.groups.member ];
groups = with lconfig.groups; [ admin nextcloud_admin grafana_admin member ];
membermaildiskquota = 100*1024*1024; # mb
nextcloudquota = 100*1024*1024; # mb
});

View file

@ -162,5 +162,6 @@ in {
${pythonEnv}/bin/python -m bootstrap.main ${configFile}
'';
};
systemd.services.lldap.restartTriggers = [ configFile ];
};
}

View file

@ -36,6 +36,8 @@
"base_member" = {};
"system_service" = {};
"system_mail" = {};
"nextcloud_admin" = {};
"grafana_admin" = {};
};
# attributes

View file

@ -0,0 +1,18 @@
{
imports = [
./grafana.nix
./prometheus.nix
./mon-postgres.nix
./mon-stalwart.nix
./mon-authelia.nix
./mon-matrix-synapse.nix
./mon-zfs.nix
./mon-miniflux.nix
./mon-hedgedoc.nix
./mon-forgejo.nix
./mon-uptime-kuma.nix
./mon-searx.nix
./mon-nextcloud.nix
];
}

View file

@ -0,0 +1,91 @@
{ config, ... }:
let
svc_domain = "grafana.${config.mine.shared.settings.domain}";
auth_domain = config.mine.shared.settings.authelia.domain;
grafana_user = config.systemd.services.grafana.serviceConfig.User;
in {
services.grafana = {
enable = true;
settings = {
server = {
http_addr = "127.0.0.1";
http_port = 3010;
root_url = "https://${svc_domain}";
};
# only allow signun with oauth
auth.disable_login_form = true;
"auth.generic_oauth" = {
enabled = true;
name = "Authelia";
icon = "signin";
client_id = "grafana";
client_secret = "$__file{${config.age.secrets.grafana-authelia-secret.path}}";
scopes = "openid profile email groups";
empty_scopes = false;
auth_url = "https://${auth_domain}/api/oidc/authorization";
token_url = "https://${auth_domain}/api/oidc/token";
api_url = "https://${auth_domain}/api/oidc/userinfo";
login_attribute_path = "preferred_username";
groups_attribute_path = "groups";
name_attribute_path = "name";
use_pkce = true;
role_attribute_path = config.mine.shared.lib.ldap.mkScope (lconfig: llib:
"contains(groups, '${lconfig.groups.grafana_admin}') && 'Admin' || contains(groups, 'editor') && 'Editor' || 'Viewer'"
);
};
};
provision = {
enable = true;
# dashboards.settings.providers = [{
# name = "my dashboards";
# options.path = "/etc/grafana-dashboards";
# }];
datasources.settings.datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}";
}
];
};
};
# authelia
services.authelia.instances.main.settings.identity_providers.oidc.clients = [{
client_id = "grafana";
client_name = "Grafana";
client_secret = "$pbkdf2-sha512$310000$81MV1.67njuS/5H2UvVsnA$vaNO3/tzVA76Jho4ngS.xFjDuYn1sDn/9qo7cD0ueMnVvzaoJj00ND5wCGzVSUnvLuxNE/enC1K5r7xKAe/Hrg";
redirect_uris = [ "https://${svc_domain}/login/generic_oauth" ];
scopes = [
"openid"
"email"
"profile"
"groups"
];
}];
environment.persistence.root.directories = [
config.services.grafana.dataDir
];
systemd.tmpfiles.rules = [
"Z ${config.services.grafana.dataDir} 0770 ${grafana_user} ${grafana_user} -"
];
age.secrets.grafana-authelia-secret.owner = grafana_user;
services.nginx.virtualHosts."${svc_domain}" = {
forceSSL = true;
enableACME = true;
locations."/".proxyPass = "http://localhost:${builtins.toString config.services.grafana.settings.server.http_port}";
};
}

View file

@ -0,0 +1,23 @@
{ config, lib, ... }:
{
services.authelia.instances.main.settings = {
telemetry.metrics = {
enabled = true;
};
};
services.prometheus.scrapeConfigs = [
{
job_name = "authelia";
static_configs = [{
targets = [ (lib.removePrefix "tcp://" config.services.authelia.instances.main.settings.telemetry.metrics.address) ];
}];
metric_relabel_configs = [{
source_labels = [ "__name__" ];
target_label = "__name__";
replacement = "authelia_$1";
}];
}
];
}

View file

@ -0,0 +1,14 @@
{ config, ... }:
{
services.forgejo.settings.metrics.ENABLED = true;
services.prometheus.scrapeConfigs = [
{
job_name = "forgejo";
static_configs = [{
targets = [ "localhost:${builtins.toString config.services.forgejo.settings.server.HTTPPORT}" ];
}];
}
];
}

View file

@ -0,0 +1,18 @@
{ config, ... }:
{
services.hedgedoc.settings = {
# enabled by default anyways
# TODO(eyJhb): disable exposing this to the WORLD
enableStatsApi = true;
};
services.prometheus.scrapeConfigs = [
{
job_name = "hedgedoc";
static_configs = [{
targets = [ "localhost:${builtins.toString config.services.hedgedoc.settings.port}"];
}];
}
];
}

View file

@ -0,0 +1,27 @@
let
metrics_port = 9734;
in {
services.matrix-synapse = {
settings = {
enable_metrics = true;
listeners = [
{
port = metrics_port;
type = "metrics";
bind_addresses = [ "localhost" ];
tls = false;
resources = [];
}
];
};
};
services.prometheus.scrapeConfigs = [
{
job_name = "matrix-synapse";
static_configs = [{
targets = [ "localhost:${builtins.toString metrics_port}"];
}];
}
];
}

View file

@ -0,0 +1,16 @@
{ config, ... }:
{
services.miniflux.config = {
METRICS_COLLECTOR = 1;
};
services.prometheus.scrapeConfigs = [
{
job_name = "miniflux";
static_configs = [{
targets = [ config.services.miniflux.config.LISTEN_ADDR ];
}];
}
];
}

View file

@ -0,0 +1,45 @@
{ config, lib, pkgs, ... }:
let
# occ bin
occ = config.services.nextcloud.occ + "/bin/nextcloud-occ";
nextcloudSetupServerinfoToken = pkgs.writeShellScript "nextcloud-setup-serverinfo-token.sh" ''
# set serverinfo_token
SERVERINFO_TOKEN="$(cat $CREDENTIALS_DIRECTORY/nextcloud-serverinfo-token)"
${occ} config:app:set serverinfo token --value "$SERVERINFO_TOKEN" > /dev/null 2>&1
'';
in {
systemd.services.nextcloud-setup = {
# runs this after all the main nextcloud-setup stuff
script = lib.mkAfter ''
${nextcloudSetupServerinfoToken}
'';
# setup credentials for service
serviceConfig.LoadCredential = [
"nextcloud-serverinfo-token:${config.age.secrets.nextcloud-serverinfo-token.path}"
];
};
services.prometheus.exporters.nextcloud = {
enable = true;
listenAddress = "localhost";
tokenFile = config.age.secrets.nextcloud-serverinfo-token.path;
url = let
scheme = if config.services.nextcloud.https then "https" else "http";
in "${scheme}://${config.services.nextcloud.hostName}";
};
# setup permissions
age.secrets.nextcloud-serverinfo-token.owner = config.services.prometheus.exporters.nextcloud.user;
services.prometheus.scrapeConfigs = [
{
job_name = "nextcloud";
static_configs = [{
targets = [ "localhost:${builtins.toString config.services.prometheus.exporters.nextcloud.port}" ];
}];
}
];
}

View file

@ -0,0 +1,34 @@
{ config, pkgs, ... }:
{
services.prometheus.exporters.postgres = {
enable = true;
listenAddress = "localhost";
runAsLocalSuperUser = true;
extraFlags = let
extraQuery = pkgs.writeText "prometehus-postgres-query.yaml" ''
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database"
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size:
usage: "GAUGE"
description: "Disk space used by the database"
'';
in [
"--extend.query-path=${extraQuery}"
];
};
services.prometheus.scrapeConfigs = [
{
job_name = "postgres";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.postgres.port}" ];
}];
}
];
}

View file

@ -0,0 +1,16 @@
{ config, ... }:
{
services.searx.settings.general.open_metrics = "thisreallydoesnotmatterasitisnotaccessiblefromoutsideofthisserver";
services.prometheus.scrapeConfigs = [
{
job_name = "searx";
basic_auth.username = "canbeanything";
basic_auth.password = config.services.searx.settings.general.open_metrics;
static_configs = [{
targets = [ config.services.searx.uwsgiConfig.http ];
}];
}
];
}

View file

@ -0,0 +1,22 @@
{ config, ... }:
{
services.stalwart-mail.settings = {
metrics.prometheus.enable = true;
};
services.prometheus.scrapeConfigs = [
{
job_name = "stalwart";
metrics_path = "/metrics/prometheus";
static_configs = [{
targets = [ "localhost:${toString config.mine.shared.settings.mail.ports.http_management}" ];
}];
metric_relabel_configs = [{
source_labels = [ "__name__" ];
target_label = "__name__";
replacement = "stalwart_$1";
}];
}
];
}

View file

@ -0,0 +1,12 @@
{ config, ... }:
{
services.prometheus.scrapeConfigs = [
{
job_name = "uptime-kuma";
static_configs = [{
targets = [ "localhost:${builtins.toString config.services.uptime-kuma.settings.PORT}" ];
}];
}
];
}

View file

@ -0,0 +1,19 @@
{ config, pkgs, ... }:
{
services.prometheus.exporters.zfs = {
enable = true;
listenAddress = "localhost";
extraFlags = [ "--collector.dataset-snapshot" ];
};
services.prometheus.scrapeConfigs = [
{
job_name = "zfs";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.zfs.port}" ];
}];
}
];
}

View file

@ -0,0 +1,28 @@
{ config, ... }:
let
prometheus_user = config.systemd.services.prometheus.serviceConfig.User;
fullDataDirPath = "/var/lib/${config.services.prometheus.stateDir}";
filesetPath = config.mine.zfsMounts."rpool/safe/svcs/prometheus";
in {
services.prometheus = {
enable = true;
globalConfig.scrape_interval = "10s";
globalConfig.scrape_timeout = "10s";
listenAddress = "localhost";
# default is 15 days, we just set it to 14 to be explicit
retentionTime = "14d";
};
fileSystems."${filesetPath}".neededForBoot = true;
environment.persistence."${filesetPath}".directories = [
fullDataDirPath
];
systemd.tmpfiles.rules = [
"Z ${fullDataDirPath} 0770 ${prometheus_user} ${prometheus_user} -"
];
}

View file

@ -49,7 +49,7 @@ let
ldapGroupFilter = config.mine.shared.lib.ldap.mkFilter (lconfig: llib:
llib.mkAnd [
(llib.mkOC lconfig.oc.groupOfUniqueNames)
(llib.mkOr [ "cn=${lconfig.groups.admin}" "cn=${lconfig.groups.member}"])
(llib.mkOr [ "cn=${lconfig.groups.nextcloud_admin}" "cn=${lconfig.groups.member}"])
]
);
ldapGroupFilterGroups = "admin;user";
@ -86,7 +86,7 @@ let
done
# promote ldap admin group to admins
${occ} ldap:promote-group ${config.mine.shared.settings.ldap.groups.admin} --yes -n
${occ} ldap:promote-group ${config.mine.shared.settings.ldap.groups.nextcloud_admin} --yes -n
'';
# script for resetting nextcloud admin password on each startup

View file

@ -23,7 +23,7 @@ in {
hash = npmDepsHash;
};
patches = [
(pkgs.writeText "authelia.patch" ''
(pkgs.writeText "uptime-kuma-database-writeable.patch" ''
diff --git a/server/database.js b/server/database.js
index 3374aff9..9e890d28 100644
--- a/server/database.js
@ -37,6 +37,22 @@ in {
const Dialect = require("knex/lib/dialects/sqlite3/index.js");
'')
# TODO(eyJhb): do we really want this?
(pkgs.writeText "uptime-kuma-disable-metrics-auth.patch" ''
diff --git a/server/server.js b/server/server.js
index db58ae82..d650a42a 100644
--- a/server/server.js
+++ b/server/server.js
@@ -292,7 +292,7 @@ let needSetup = false;
// Prometheus API metrics /metrics
// With Basic Auth using the first user's username/password
- app.get("/metrics", apiAuth, prometheusAPIMetrics());
+ app.use("/metrics", prometheusAPIMetrics());
app.use("/", expressStaticGzip("dist", {
enableBrotli: true,
'')
];
});
};

View file

@ -34,6 +34,7 @@
nextcloud-admin-pass.file = ./nextcloud/admin-pass.age;
nextcloud-secrets.file = ./nextcloud/secrets.age;
nextcloud-smtp-pass.file = ./nextcloud/smtp-pass.age;
nextcloud-serverinfo-token.file = ./nextcloud/serverinfo-token.age;
# stalwart
stalwart-admin-fallback-password.file = ./stalwart/admin-fallback-password.age;
@ -62,6 +63,9 @@
# notify
notify-ldap-pass.file = ./notify/ldap-pass.age;
notify-env.file = ./notify/env.age;
# grafana
grafana-authelia-secret.file = ./grafana/authelia-secret.age;
};
users.groups.secrets-lldap-bind-user-pass = {};

View file

@ -0,0 +1,11 @@
age-encryption.org/v1
-> ssh-ed25519 QSDXqg mcA7aWulfqHTARfxzs9ECZaJRMZKLxZgl4uYXrsL6Tk
IOKrdtTiG/Wc8qQb5zip1F3B4BHAGkEw8hjz22UY80k
-> X25519 kqD2VC9Vw/2rrd/C1TR5He/78anx3UYXNbjs0vNXCz4
ZYenf1LK+YAlil/oiZIfGGyaK9S6pt8LLpCbmlaKn9s
-> ssh-ed25519 n8n9DQ PlW/1TA71RhclXIC2RlKUUOnqOq3qWy8yshqgM3Nu10
2j6c3UjFc/RJJrqeWIezHx53DcPHFPi5a8WXnyqkXhU
-> ssh-ed25519 BTp6UA n2idpPd9RFDbzvD2svo3A0NU7kx1nUEYzwFs0gpxn3Q
/4F5l1dXBvF0nWXvT8nxPPCAxB4heeUMSBrGMY3gfng
--- 7xw3+Ket2jYmH8wsoG2ivWUYLkyoR0et5FELrn+zzMo
9XzvèäJºEŠó«y⺈†è}\šÙ©‰ï\xÓºeè”11ûõ¯ƒô7XÒÑb%„á Õ˜.…ïj‰!‹Ä6œšBÃ[/ÆÀx!8Àâ‹ÕÔÿÿÍ´¤'2ŠvRúž§4W:]k

Binary file not shown.

View file

@ -44,6 +44,7 @@ in
"nextcloud/admin-pass.age".publicKeys = defaultAccess;
"nextcloud/secrets.age".publicKeys = defaultAccess;
"nextcloud/smtp-pass.age".publicKeys = defaultAccess;
"nextcloud/serverinfo-token.age".publicKeys = defaultAccess;
# mailserver/stalwart
"stalwart/admin-fallback-password.age".publicKeys = defaultAccess;
@ -72,4 +73,7 @@ in
# notify
"notify/ldap-pass.age".publicKeys = defaultAccess;
"notify/env.age".publicKeys = defaultAccess;
# grafana
"grafana/authelia-secret.age".publicKeys = defaultAccess;
}

View file

@ -10,47 +10,58 @@ let
-out "$out/ca.pem" -keyout "$out/ca.key"
'';
in {
services.nginx = {
enable = true;
recommendedOptimisation = true;
recommendedTlsSettings = true;
recommendedGzipSettings = true;
# recommendedBrotliSettings = true;
recommendedProxySettings = true;
# only allow PFS-enabled ciphers with AES256
sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
# disable access logs
commonHttpConfig= ''
access_log off;
'';
# setup a default site
virtualHosts.default = {
default = lib.mkDefault true;
addSSL = true;
sslCertificateKey = "${snakeOilCa}/ca.key";
sslCertificate = "${snakeOilCa}/ca.pem";
root = pkgs.writeTextDir "index.html" ''
<html>
<head>
<title>Nothing to see</title>
</head>
<body>
<p>Like I said, nothing to see here</p>
</body>
</html>
'';
};
# block all /metrics endpoints
options.services.nginx.virtualHosts = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
config.locations."/metrics" = lib.mkDefault {
extraConfig = "deny all;";
};
});
};
networking.firewall = {
allowedTCPPorts = [80 443];
allowedUDPPorts = [443];
config = {
services.nginx = {
enable = true;
recommendedOptimisation = true;
recommendedTlsSettings = true;
recommendedGzipSettings = true;
# recommendedBrotliSettings = true;
recommendedProxySettings = true;
# only allow PFS-enabled ciphers with AES256
sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
# disable access logs
commonHttpConfig= ''
access_log off;
'';
# setup a default site
virtualHosts.default = {
default = lib.mkDefault true;
addSSL = true;
sslCertificateKey = "${snakeOilCa}/ca.key";
sslCertificate = "${snakeOilCa}/ca.pem";
root = pkgs.writeTextDir "index.html" ''
<html>
<head>
<title>Nothing to see</title>
</head>
<body>
<p>Like I said, nothing to see here</p>
</body>
</html>
'';
};
};
networking.firewall = {
allowedTCPPorts = [80 443];
allowedUDPPorts = [443];
};
};
}

View file

@ -10,9 +10,15 @@ in {
default = {};
};
# config = {
# mine.zfsMounts = let
# zfsFilesystems = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
# in lib.mapAttrs' (_: v: lib.nameValuePair v.device v.mountPoint) zfsFilesystems;
# };
# TODO: fix this better. We just do this, so we do not rely on fileSystems, otherwise we cannot
# use this with impermanence
config = {
mine.zfsMounts = let
zfsFilesystems = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
in lib.mapAttrs' (_: v: lib.nameValuePair v.device v.mountPoint) zfsFilesystems;
mine.zfsMounts = lib.mapAttrs' (n: v: lib.nameValuePair ("rpool/" + n) v.mountpoint) config.mine.disks.pools.rpool.datasets;
};
}