Compare commits
No commits in common. "e18bdf5e3bb52656609a20bb5481028936d367b0" and "cad1ac566a436fafcbfaae36d01bd572d4f6a0f6" have entirely different histories.
e18bdf5e3b
...
cad1ac566a
26 changed files with 54 additions and 492 deletions
|
@ -19,17 +19,19 @@
|
|||
./gerd/services/murmur.nix
|
||||
./gerd/services/hedgedoc.nix
|
||||
./gerd/services/cyberchef.nix
|
||||
./gerd/services/nextcloud.nix
|
||||
./gerd/services/nextcloud.nix
|
||||
./gerd/services/stalwart
|
||||
./gerd/services/wger
|
||||
./gerd/services/searx.nix
|
||||
./gerd/services/miniflux.nix
|
||||
./gerd/services/matrix
|
||||
./gerd/services/uptime-kuma.nix
|
||||
./gerd/services/rallly
|
||||
./gerd/services/notify
|
||||
|
||||
./gerd/services/monitoring
|
||||
./gerd/services/matrix
|
||||
|
||||
./gerd/services/uptime-kuma.nix
|
||||
|
||||
./gerd/services/rallly
|
||||
|
||||
./gerd/services/notify
|
||||
];
|
||||
|
||||
networking.hostName = "gerd";
|
||||
|
@ -47,8 +49,6 @@
|
|||
"safe/svcs/stalwart" = { mountpoint = "/srv/stalwart"; extra.options.quota = "5G"; };
|
||||
"safe/svcs/synapse" = { mountpoint = "/srv/synapse"; extra.options.quota = "5G"; };
|
||||
"safe/svcs/wger" = { mountpoint = "/srv/wger"; extra.options.quota = "5G"; };
|
||||
"safe/svcs/prometheus" = { mountpoint = "/srv/prometheus"; extra.options.quota = "5G"; };
|
||||
|
||||
"safe/svcs/postgresql" = { mountpoint = "/srv/postgresql"; extra.options.quota = "5G"; };
|
||||
"backup/postgresql" = { mountpoint = "/media/backup/postgresqlbackup"; extra.options.quota = "5G"; };
|
||||
};
|
||||
|
|
|
@ -275,7 +275,7 @@ in {
|
|||
user_id = name;
|
||||
display_name = name; # required for nextcloud
|
||||
membermail = mkProvisionEmail name;
|
||||
groups = with lconfig.groups; [ admin nextcloud_admin grafana_admin member ];
|
||||
groups = [ lconfig.groups.admin lconfig.groups.member ];
|
||||
membermaildiskquota = 100*1024*1024; # mb
|
||||
nextcloudquota = 100*1024*1024; # mb
|
||||
});
|
||||
|
|
|
@ -162,6 +162,5 @@ in {
|
|||
${pythonEnv}/bin/python -m bootstrap.main ${configFile}
|
||||
'';
|
||||
};
|
||||
systemd.services.lldap.restartTriggers = [ configFile ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -36,8 +36,6 @@
|
|||
"base_member" = {};
|
||||
"system_service" = {};
|
||||
"system_mail" = {};
|
||||
"nextcloud_admin" = {};
|
||||
"grafana_admin" = {};
|
||||
};
|
||||
|
||||
# attributes
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
{
|
||||
imports = [
|
||||
./grafana.nix
|
||||
./prometheus.nix
|
||||
|
||||
./mon-postgres.nix
|
||||
./mon-stalwart.nix
|
||||
./mon-authelia.nix
|
||||
./mon-matrix-synapse.nix
|
||||
./mon-zfs.nix
|
||||
./mon-miniflux.nix
|
||||
./mon-hedgedoc.nix
|
||||
./mon-forgejo.nix
|
||||
./mon-uptime-kuma.nix
|
||||
./mon-searx.nix
|
||||
./mon-nextcloud.nix
|
||||
];
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
let
|
||||
svc_domain = "grafana.${config.mine.shared.settings.domain}";
|
||||
|
||||
auth_domain = config.mine.shared.settings.authelia.domain;
|
||||
|
||||
grafana_user = config.systemd.services.grafana.serviceConfig.User;
|
||||
in {
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = 3010;
|
||||
root_url = "https://${svc_domain}";
|
||||
};
|
||||
|
||||
# only allow signun with oauth
|
||||
auth.disable_login_form = true;
|
||||
|
||||
"auth.generic_oauth" = {
|
||||
enabled = true;
|
||||
name = "Authelia";
|
||||
icon = "signin";
|
||||
client_id = "grafana";
|
||||
client_secret = "$__file{${config.age.secrets.grafana-authelia-secret.path}}";
|
||||
scopes = "openid profile email groups";
|
||||
empty_scopes = false;
|
||||
auth_url = "https://${auth_domain}/api/oidc/authorization";
|
||||
token_url = "https://${auth_domain}/api/oidc/token";
|
||||
api_url = "https://${auth_domain}/api/oidc/userinfo";
|
||||
login_attribute_path = "preferred_username";
|
||||
groups_attribute_path = "groups";
|
||||
name_attribute_path = "name";
|
||||
use_pkce = true;
|
||||
|
||||
role_attribute_path = config.mine.shared.lib.ldap.mkScope (lconfig: llib:
|
||||
"contains(groups, '${lconfig.groups.grafana_admin}') && 'Admin' || contains(groups, 'editor') && 'Editor' || 'Viewer'"
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
|
||||
# dashboards.settings.providers = [{
|
||||
# name = "my dashboards";
|
||||
# options.path = "/etc/grafana-dashboards";
|
||||
# }];
|
||||
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
url = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# authelia
|
||||
services.authelia.instances.main.settings.identity_providers.oidc.clients = [{
|
||||
client_id = "grafana";
|
||||
client_name = "Grafana";
|
||||
client_secret = "$pbkdf2-sha512$310000$81MV1.67njuS/5H2UvVsnA$vaNO3/tzVA76Jho4ngS.xFjDuYn1sDn/9qo7cD0ueMnVvzaoJj00ND5wCGzVSUnvLuxNE/enC1K5r7xKAe/Hrg";
|
||||
redirect_uris = [ "https://${svc_domain}/login/generic_oauth" ];
|
||||
scopes = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
"groups"
|
||||
];
|
||||
}];
|
||||
|
||||
environment.persistence.root.directories = [
|
||||
config.services.grafana.dataDir
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"Z ${config.services.grafana.dataDir} 0770 ${grafana_user} ${grafana_user} -"
|
||||
];
|
||||
|
||||
age.secrets.grafana-authelia-secret.owner = grafana_user;
|
||||
|
||||
services.nginx.virtualHosts."${svc_domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/".proxyPass = "http://localhost:${builtins.toString config.services.grafana.settings.server.http_port}";
|
||||
};
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
services.authelia.instances.main.settings = {
|
||||
telemetry.metrics = {
|
||||
enabled = true;
|
||||
};
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "authelia";
|
||||
static_configs = [{
|
||||
targets = [ (lib.removePrefix "tcp://" config.services.authelia.instances.main.settings.telemetry.metrics.address) ];
|
||||
}];
|
||||
metric_relabel_configs = [{
|
||||
source_labels = [ "__name__" ];
|
||||
target_label = "__name__";
|
||||
replacement = "authelia_$1";
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.forgejo.settings.metrics.ENABLED = true;
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "forgejo";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${builtins.toString config.services.forgejo.settings.server.HTTPPORT}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.hedgedoc.settings = {
|
||||
# enabled by default anyways
|
||||
# TODO(eyJhb): disable exposing this to the WORLD
|
||||
enableStatsApi = true;
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "hedgedoc";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${builtins.toString config.services.hedgedoc.settings.port}"];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
let
|
||||
metrics_port = 9734;
|
||||
in {
|
||||
services.matrix-synapse = {
|
||||
settings = {
|
||||
enable_metrics = true;
|
||||
listeners = [
|
||||
{
|
||||
port = metrics_port;
|
||||
type = "metrics";
|
||||
bind_addresses = [ "localhost" ];
|
||||
tls = false;
|
||||
resources = [];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "matrix-synapse";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${builtins.toString metrics_port}"];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.miniflux.config = {
|
||||
METRICS_COLLECTOR = 1;
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "miniflux";
|
||||
static_configs = [{
|
||||
targets = [ config.services.miniflux.config.LISTEN_ADDR ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
# occ bin
|
||||
occ = config.services.nextcloud.occ + "/bin/nextcloud-occ";
|
||||
|
||||
nextcloudSetupServerinfoToken = pkgs.writeShellScript "nextcloud-setup-serverinfo-token.sh" ''
|
||||
# set serverinfo_token
|
||||
SERVERINFO_TOKEN="$(cat $CREDENTIALS_DIRECTORY/nextcloud-serverinfo-token)"
|
||||
${occ} config:app:set serverinfo token --value "$SERVERINFO_TOKEN" > /dev/null 2>&1
|
||||
'';
|
||||
in {
|
||||
systemd.services.nextcloud-setup = {
|
||||
# runs this after all the main nextcloud-setup stuff
|
||||
script = lib.mkAfter ''
|
||||
${nextcloudSetupServerinfoToken}
|
||||
'';
|
||||
|
||||
# setup credentials for service
|
||||
serviceConfig.LoadCredential = [
|
||||
"nextcloud-serverinfo-token:${config.age.secrets.nextcloud-serverinfo-token.path}"
|
||||
];
|
||||
};
|
||||
|
||||
services.prometheus.exporters.nextcloud = {
|
||||
enable = true;
|
||||
listenAddress = "localhost";
|
||||
tokenFile = config.age.secrets.nextcloud-serverinfo-token.path;
|
||||
url = let
|
||||
scheme = if config.services.nextcloud.https then "https" else "http";
|
||||
in "${scheme}://${config.services.nextcloud.hostName}";
|
||||
};
|
||||
|
||||
# setup permissions
|
||||
age.secrets.nextcloud-serverinfo-token.owner = config.services.prometheus.exporters.nextcloud.user;
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "nextcloud";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${builtins.toString config.services.prometheus.exporters.nextcloud.port}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.prometheus.exporters.postgres = {
|
||||
enable = true;
|
||||
listenAddress = "localhost";
|
||||
runAsLocalSuperUser = true;
|
||||
|
||||
extraFlags = let
|
||||
extraQuery = pkgs.writeText "prometehus-postgres-query.yaml" ''
|
||||
pg_database:
|
||||
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database"
|
||||
metrics:
|
||||
- datname:
|
||||
usage: "LABEL"
|
||||
description: "Name of the database"
|
||||
- size:
|
||||
usage: "GAUGE"
|
||||
description: "Disk space used by the database"
|
||||
'';
|
||||
in [
|
||||
"--extend.query-path=${extraQuery}"
|
||||
];
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "postgres";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString config.services.prometheus.exporters.postgres.port}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.searx.settings.general.open_metrics = "thisreallydoesnotmatterasitisnotaccessiblefromoutsideofthisserver";
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "searx";
|
||||
basic_auth.username = "canbeanything";
|
||||
basic_auth.password = config.services.searx.settings.general.open_metrics;
|
||||
static_configs = [{
|
||||
targets = [ config.services.searx.uwsgiConfig.http ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.stalwart-mail.settings = {
|
||||
metrics.prometheus.enable = true;
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "stalwart";
|
||||
metrics_path = "/metrics/prometheus";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString config.mine.shared.settings.mail.ports.http_management}" ];
|
||||
}];
|
||||
metric_relabel_configs = [{
|
||||
source_labels = [ "__name__" ];
|
||||
target_label = "__name__";
|
||||
replacement = "stalwart_$1";
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "uptime-kuma";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${builtins.toString config.services.uptime-kuma.settings.PORT}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.prometheus.exporters.zfs = {
|
||||
enable = true;
|
||||
listenAddress = "localhost";
|
||||
|
||||
extraFlags = [ "--collector.dataset-snapshot" ];
|
||||
};
|
||||
|
||||
services.prometheus.scrapeConfigs = [
|
||||
{
|
||||
job_name = "zfs";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString config.services.prometheus.exporters.zfs.port}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
let
|
||||
prometheus_user = config.systemd.services.prometheus.serviceConfig.User;
|
||||
|
||||
fullDataDirPath = "/var/lib/${config.services.prometheus.stateDir}";
|
||||
|
||||
filesetPath = config.mine.zfsMounts."rpool/safe/svcs/prometheus";
|
||||
in {
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
globalConfig.scrape_interval = "10s";
|
||||
globalConfig.scrape_timeout = "10s";
|
||||
listenAddress = "localhost";
|
||||
|
||||
# default is 15 days, we just set it to 14 to be explicit
|
||||
retentionTime = "14d";
|
||||
};
|
||||
|
||||
fileSystems."${filesetPath}".neededForBoot = true;
|
||||
environment.persistence."${filesetPath}".directories = [
|
||||
fullDataDirPath
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"Z ${fullDataDirPath} 0770 ${prometheus_user} ${prometheus_user} -"
|
||||
];
|
||||
}
|
|
@ -49,7 +49,7 @@ let
|
|||
ldapGroupFilter = config.mine.shared.lib.ldap.mkFilter (lconfig: llib:
|
||||
llib.mkAnd [
|
||||
(llib.mkOC lconfig.oc.groupOfUniqueNames)
|
||||
(llib.mkOr [ "cn=${lconfig.groups.nextcloud_admin}" "cn=${lconfig.groups.member}"])
|
||||
(llib.mkOr [ "cn=${lconfig.groups.admin}" "cn=${lconfig.groups.member}"])
|
||||
]
|
||||
);
|
||||
ldapGroupFilterGroups = "admin;user";
|
||||
|
@ -86,7 +86,7 @@ let
|
|||
done
|
||||
|
||||
# promote ldap admin group to admins
|
||||
${occ} ldap:promote-group ${config.mine.shared.settings.ldap.groups.nextcloud_admin} --yes -n
|
||||
${occ} ldap:promote-group ${config.mine.shared.settings.ldap.groups.admin} --yes -n
|
||||
'';
|
||||
|
||||
# script for resetting nextcloud admin password on each startup
|
||||
|
|
|
@ -23,7 +23,7 @@ in {
|
|||
hash = npmDepsHash;
|
||||
};
|
||||
patches = [
|
||||
(pkgs.writeText "uptime-kuma-database-writeable.patch" ''
|
||||
(pkgs.writeText "authelia.patch" ''
|
||||
diff --git a/server/database.js b/server/database.js
|
||||
index 3374aff9..9e890d28 100644
|
||||
--- a/server/database.js
|
||||
|
@ -37,22 +37,6 @@ in {
|
|||
|
||||
const Dialect = require("knex/lib/dialects/sqlite3/index.js");
|
||||
'')
|
||||
# TODO(eyJhb): do we really want this?
|
||||
(pkgs.writeText "uptime-kuma-disable-metrics-auth.patch" ''
|
||||
diff --git a/server/server.js b/server/server.js
|
||||
index db58ae82..d650a42a 100644
|
||||
--- a/server/server.js
|
||||
+++ b/server/server.js
|
||||
@@ -292,7 +292,7 @@ let needSetup = false;
|
||||
|
||||
// Prometheus API metrics /metrics
|
||||
// With Basic Auth using the first user's username/password
|
||||
- app.get("/metrics", apiAuth, prometheusAPIMetrics());
|
||||
+ app.use("/metrics", prometheusAPIMetrics());
|
||||
|
||||
app.use("/", expressStaticGzip("dist", {
|
||||
enableBrotli: true,
|
||||
'')
|
||||
];
|
||||
});
|
||||
};
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
nextcloud-admin-pass.file = ./nextcloud/admin-pass.age;
|
||||
nextcloud-secrets.file = ./nextcloud/secrets.age;
|
||||
nextcloud-smtp-pass.file = ./nextcloud/smtp-pass.age;
|
||||
nextcloud-serverinfo-token.file = ./nextcloud/serverinfo-token.age;
|
||||
|
||||
# stalwart
|
||||
stalwart-admin-fallback-password.file = ./stalwart/admin-fallback-password.age;
|
||||
|
@ -63,9 +62,6 @@
|
|||
# notify
|
||||
notify-ldap-pass.file = ./notify/ldap-pass.age;
|
||||
notify-env.file = ./notify/env.age;
|
||||
|
||||
# grafana
|
||||
grafana-authelia-secret.file = ./grafana/authelia-secret.age;
|
||||
};
|
||||
|
||||
users.groups.secrets-lldap-bind-user-pass = {};
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 QSDXqg mcA7aWulfqHTARfxzs9ECZaJRMZKLxZgl4uYXrsL6Tk
|
||||
IOKrdtTiG/Wc8qQb5zip1F3B4BHAGkEw8hjz22UY80k
|
||||
-> X25519 kqD2VC9Vw/2rrd/C1TR5He/78anx3UYXNbjs0vNXCz4
|
||||
ZYenf1LK+YAlil/oiZIfGGyaK9S6pt8LLpCbmlaKn9s
|
||||
-> ssh-ed25519 n8n9DQ PlW/1TA71RhclXIC2RlKUUOnqOq3qWy8yshqgM3Nu10
|
||||
2j6c3UjFc/RJJrqeWIezHx53DcPHFPi5a8WXnyqkXhU
|
||||
-> ssh-ed25519 BTp6UA n2idpPd9RFDbzvD2svo3A0NU7kx1nUEYzwFs0gpxn3Q
|
||||
/4F5l1dXBvF0nWXvT8nxPPCAxB4heeUMSBrGMY3gfng
|
||||
--- 7xw3+Ket2jYmH8wsoG2ivWUYLkyoR0et5FELrn+zzMo
|
||||
9XzvèäJºEŠó«‘y⺈†è}\šÙ©‰ï\xÓºeè”11ûõ¯ƒô7XÒÑb%„á Õ˜.…ïj‰!‹Ä6œšBÃ[/ÆÀx!8Àâ‹ÕÔÿÿÍ´¤'2ŠvRúž§4W:]k
|
Binary file not shown.
|
@ -44,7 +44,6 @@ in
|
|||
"nextcloud/admin-pass.age".publicKeys = defaultAccess;
|
||||
"nextcloud/secrets.age".publicKeys = defaultAccess;
|
||||
"nextcloud/smtp-pass.age".publicKeys = defaultAccess;
|
||||
"nextcloud/serverinfo-token.age".publicKeys = defaultAccess;
|
||||
|
||||
# mailserver/stalwart
|
||||
"stalwart/admin-fallback-password.age".publicKeys = defaultAccess;
|
||||
|
@ -73,7 +72,4 @@ in
|
|||
# notify
|
||||
"notify/ldap-pass.age".publicKeys = defaultAccess;
|
||||
"notify/env.age".publicKeys = defaultAccess;
|
||||
|
||||
# grafana
|
||||
"grafana/authelia-secret.age".publicKeys = defaultAccess;
|
||||
}
|
||||
|
|
|
@ -10,58 +10,47 @@ let
|
|||
-out "$out/ca.pem" -keyout "$out/ca.key"
|
||||
'';
|
||||
in {
|
||||
# block all /metrics endpoints
|
||||
options.services.nginx.virtualHosts = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule {
|
||||
config.locations."/metrics" = lib.mkDefault {
|
||||
extraConfig = "deny all;";
|
||||
};
|
||||
});
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
recommendedOptimisation = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedGzipSettings = true;
|
||||
# recommendedBrotliSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
|
||||
# only allow PFS-enabled ciphers with AES256
|
||||
sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
|
||||
|
||||
# disable access logs
|
||||
commonHttpConfig= ''
|
||||
access_log off;
|
||||
'';
|
||||
|
||||
# setup a default site
|
||||
virtualHosts.default = {
|
||||
default = lib.mkDefault true;
|
||||
addSSL = true;
|
||||
|
||||
sslCertificateKey = "${snakeOilCa}/ca.key";
|
||||
sslCertificate = "${snakeOilCa}/ca.pem";
|
||||
|
||||
root = pkgs.writeTextDir "index.html" ''
|
||||
<html>
|
||||
<head>
|
||||
<title>Nothing to see</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Like I said, nothing to see here</p>
|
||||
</body>
|
||||
</html>
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
recommendedOptimisation = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedGzipSettings = true;
|
||||
# recommendedBrotliSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
|
||||
# only allow PFS-enabled ciphers with AES256
|
||||
sslCiphers = "AES256+EECDH:AES256+EDH:!aNULL";
|
||||
|
||||
# disable access logs
|
||||
commonHttpConfig= ''
|
||||
access_log off;
|
||||
'';
|
||||
|
||||
# setup a default site
|
||||
virtualHosts.default = {
|
||||
default = lib.mkDefault true;
|
||||
addSSL = true;
|
||||
|
||||
sslCertificateKey = "${snakeOilCa}/ca.key";
|
||||
sslCertificate = "${snakeOilCa}/ca.pem";
|
||||
|
||||
root = pkgs.writeTextDir "index.html" ''
|
||||
<html>
|
||||
<head>
|
||||
<title>Nothing to see</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>Like I said, nothing to see here</p>
|
||||
</body>
|
||||
</html>
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [80 443];
|
||||
allowedUDPPorts = [443];
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [80 443];
|
||||
allowedUDPPorts = [443];
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -10,15 +10,9 @@ in {
|
|||
default = {};
|
||||
};
|
||||
|
||||
# config = {
|
||||
# mine.zfsMounts = let
|
||||
# zfsFilesystems = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
|
||||
# in lib.mapAttrs' (_: v: lib.nameValuePair v.device v.mountPoint) zfsFilesystems;
|
||||
# };
|
||||
|
||||
# TODO: fix this better. We just do this, so we do not rely on fileSystems, otherwise we cannot
|
||||
# use this with impermanence
|
||||
config = {
|
||||
mine.zfsMounts = lib.mapAttrs' (n: v: lib.nameValuePair ("rpool/" + n) v.mountpoint) config.mine.disks.pools.rpool.datasets;
|
||||
mine.zfsMounts = let
|
||||
zfsFilesystems = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
|
||||
in lib.mapAttrs' (_: v: lib.nameValuePair v.device v.mountPoint) zfsFilesystems;
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue