servo: gate costly services behind sane.maxBuildCost option

This commit is contained in:
2025-08-21 02:42:58 +00:00
parent 91578c0b78
commit e700ff392f
4 changed files with 502 additions and 494 deletions

View File

@@ -14,158 +14,160 @@
#
# N.B.: default install DOES NOT SUPPORT DLNA out of the box.
# one must install it as a "plugin", which can be done through the UI.
{ ... }:
{ config, lib, ... }:
# lib.mkIf false #< XXX(2024-11-17): disabled because it hasn't been working for months; web UI hangs on load, TVs see no files
{
# https://jellyfin.org/docs/general/networking/index.html
sane.ports.ports."1900" = {
protocol = [ "udp" ];
visibleTo.lan = true;
description = "colin-upnp-for-jellyfin";
};
sane.ports.ports."7359" = {
protocol = [ "udp" ];
visibleTo.lan = true;
description = "colin-jellyfin-specific-client-discovery";
# ^ not sure if this is necessary: copied this port from nixos jellyfin.openFirewall
};
# not sure if 8096/8920 get used either:
sane.ports.ports."8096" = {
protocol = [ "tcp" ];
visibleTo.lan = true;
description = "colin-jellyfin-http-lan";
};
sane.ports.ports."8920" = {
protocol = [ "tcp" ];
visibleTo.lan = true;
description = "colin-jellyfin-https-lan";
};
sane.persist.sys.byStore.plaintext = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/data"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/metadata"; method = "bind"; }
# TODO: ship plugins statically, via nix. that'll be less fragile
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/plugins/DLNA_5.0.0.0"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/root"; method = "bind"; }
];
sane.persist.sys.byStore.ephemeral = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/log"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/transcodes"; method = "bind"; }
];
services.jellyfin.enable = true;
users.users.jellyfin.extraGroups = [ "media" ];
sane.fs."/var/lib/jellyfin".dir.acl = {
user = "jellyfin";
group = "jellyfin";
mode = "0700";
};
# `"Jellyfin.Plugin.Dlna": "Debug"` logging: <https://jellyfin.org/docs/general/networking/dlna>
# TODO: switch Dlna back to 'Information' once satisfied with stability
sane.fs."/var/lib/jellyfin/config/logging.json".symlink.text = ''
{
"Serilog": {
"MinimumLevel": {
"Default": "Information",
"Override": {
"Microsoft": "Warning",
"System": "Warning",
"Jellyfin.Plugin.Dlna": "Debug"
}
},
"WriteTo": [
{
"Name": "Console",
"Args": {
"outputTemplate": "[{Timestamp:HH:mm:ss}] [{Level:u3}] [{ThreadId}] {SourceContext}: {Message:lj}{NewLine}{Exception}"
}
}
],
"Enrich": [ "FromLogContext", "WithThreadId" ]
}
}
'';
sane.fs."/var/lib/jellyfin/config/network.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<BaseUrl />
<EnableHttps>false</EnableHttps>
<RequireHttps>false</RequireHttps>
<InternalHttpPort>8096</InternalHttpPort>
<InternalHttpsPort>8920</InternalHttpsPort>
<PublicHttpPort>8096</PublicHttpPort>
<PublicHttpsPort>8920</PublicHttpsPort>
<AutoDiscovery>true</AutoDiscovery>
<EnableUPnP>false</EnableUPnP>
<EnableIPv4>true</EnableIPv4>
<EnableIPv6>false</EnableIPv6>
<EnableRemoteAccess>true</EnableRemoteAccess>
<LocalNetworkSubnets>
<string>10.78.76.0/22</string>
</LocalNetworkSubnets>
<KnownProxies>
<string>127.0.0.1</string>
<string>localhost</string>
<string>10.78.79.1</string>
</KnownProxies>
<IgnoreVirtualInterfaces>false</IgnoreVirtualInterfaces>
<VirtualInterfaceNames />
<EnablePublishedServerUriByRequest>false</EnablePublishedServerUriByRequest>
<PublishedServerUriBySubnet />
<RemoteIPFilter />
<IsRemoteIPFilterBlacklist>false</IsRemoteIPFilterBlacklist>
</NetworkConfiguration>
'';
# guest user id is `5ad194d60dca41de84b332950ffc4308`
sane.fs."/var/lib/jellyfin/plugins/configurations/Jellyfin.Plugin.Dlna.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<DlnaPluginConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<EnablePlayTo>true</EnablePlayTo>
<ClientDiscoveryIntervalSeconds>60</ClientDiscoveryIntervalSeconds>
<BlastAliveMessages>true</BlastAliveMessages>
<AliveMessageIntervalSeconds>180</AliveMessageIntervalSeconds>
<SendOnlyMatchedHost>true</SendOnlyMatchedHost>
<DefaultUserId>5ad194d6-0dca-41de-84b3-32950ffc4308</DefaultUserId>
</DlnaPluginConfiguration>
'';
# fix LG TV to play more files.
# there are certain files for which it only supports Direct Play (not even "Direct Stream" -- but "Direct Play").
# this isn't a 100% fix: patching the profile allows e.g. Azumanga Daioh to play,
# but A Place Further Than the Universe still fails as before.
#
# profile is based on upstream: <https://github.com/jellyfin/jellyfin-plugin-dlna>
sane.fs."/var/lib/jellyfin/plugins/DLNA_5.0.0.0/profiles/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
# XXX(2024-11-17): old method, but the file referenced seems not to be used and setting just it causes failures:
# > [DBG] Jellyfin.Plugin.Dlna.ContentDirectory.ContentDirectoryService: Not eligible for DirectPlay due to unsupported subtitles
# sane.fs."/var/lib/jellyfin/plugins/configurations/dlna/user/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
systemd.services.jellyfin.unitConfig.RequiresMountsFor = [
"/var/media"
];
# Jellyfin multimedia server
# this is mostly taken from the official jellfin.org docs
services.nginx.virtualHosts."jelly.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8096";
proxyWebsockets = true;
recommendedProxySettings = true;
# extraConfig = ''
# # Disable buffering when the nginx proxy gets very resource heavy upon streaming
# proxy_buffering off;
# '';
config = lib.mkIf (config.sane.maxBuildCost >= 2) {
# https://jellyfin.org/docs/general/networking/index.html
sane.ports.ports."1900" = {
protocol = [ "udp" ];
visibleTo.lan = true;
description = "colin-upnp-for-jellyfin";
};
sane.ports.ports."7359" = {
protocol = [ "udp" ];
visibleTo.lan = true;
description = "colin-jellyfin-specific-client-discovery";
# ^ not sure if this is necessary: copied this port from nixos jellyfin.openFirewall
};
# not sure if 8096/8920 get used either:
sane.ports.ports."8096" = {
protocol = [ "tcp" ];
visibleTo.lan = true;
description = "colin-jellyfin-http-lan";
};
sane.ports.ports."8920" = {
protocol = [ "tcp" ];
visibleTo.lan = true;
description = "colin-jellyfin-https-lan";
};
};
sane.dns.zones."uninsane.org".inet.CNAME."jelly" = "native";
sane.persist.sys.byStore.plaintext = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/data"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/metadata"; method = "bind"; }
# TODO: ship plugins statically, via nix. that'll be less fragile
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/plugins/DLNA_5.0.0.0"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/root"; method = "bind"; }
];
sane.persist.sys.byStore.ephemeral = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/log"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/transcodes"; method = "bind"; }
];
services.jellyfin.enable = true;
users.users.jellyfin.extraGroups = [ "media" ];
sane.fs."/var/lib/jellyfin".dir.acl = {
user = "jellyfin";
group = "jellyfin";
mode = "0700";
};
# `"Jellyfin.Plugin.Dlna": "Debug"` logging: <https://jellyfin.org/docs/general/networking/dlna>
# TODO: switch Dlna back to 'Information' once satisfied with stability
sane.fs."/var/lib/jellyfin/config/logging.json".symlink.text = ''
{
"Serilog": {
"MinimumLevel": {
"Default": "Information",
"Override": {
"Microsoft": "Warning",
"System": "Warning",
"Jellyfin.Plugin.Dlna": "Debug"
}
},
"WriteTo": [
{
"Name": "Console",
"Args": {
"outputTemplate": "[{Timestamp:HH:mm:ss}] [{Level:u3}] [{ThreadId}] {SourceContext}: {Message:lj}{NewLine}{Exception}"
}
}
],
"Enrich": [ "FromLogContext", "WithThreadId" ]
}
}
'';
sane.fs."/var/lib/jellyfin/config/network.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<BaseUrl />
<EnableHttps>false</EnableHttps>
<RequireHttps>false</RequireHttps>
<InternalHttpPort>8096</InternalHttpPort>
<InternalHttpsPort>8920</InternalHttpsPort>
<PublicHttpPort>8096</PublicHttpPort>
<PublicHttpsPort>8920</PublicHttpsPort>
<AutoDiscovery>true</AutoDiscovery>
<EnableUPnP>false</EnableUPnP>
<EnableIPv4>true</EnableIPv4>
<EnableIPv6>false</EnableIPv6>
<EnableRemoteAccess>true</EnableRemoteAccess>
<LocalNetworkSubnets>
<string>10.78.76.0/22</string>
</LocalNetworkSubnets>
<KnownProxies>
<string>127.0.0.1</string>
<string>localhost</string>
<string>10.78.79.1</string>
</KnownProxies>
<IgnoreVirtualInterfaces>false</IgnoreVirtualInterfaces>
<VirtualInterfaceNames />
<EnablePublishedServerUriByRequest>false</EnablePublishedServerUriByRequest>
<PublishedServerUriBySubnet />
<RemoteIPFilter />
<IsRemoteIPFilterBlacklist>false</IsRemoteIPFilterBlacklist>
</NetworkConfiguration>
'';
# guest user id is `5ad194d60dca41de84b332950ffc4308`
sane.fs."/var/lib/jellyfin/plugins/configurations/Jellyfin.Plugin.Dlna.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<DlnaPluginConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<EnablePlayTo>true</EnablePlayTo>
<ClientDiscoveryIntervalSeconds>60</ClientDiscoveryIntervalSeconds>
<BlastAliveMessages>true</BlastAliveMessages>
<AliveMessageIntervalSeconds>180</AliveMessageIntervalSeconds>
<SendOnlyMatchedHost>true</SendOnlyMatchedHost>
<DefaultUserId>5ad194d6-0dca-41de-84b3-32950ffc4308</DefaultUserId>
</DlnaPluginConfiguration>
'';
# fix LG TV to play more files.
# there are certain files for which it only supports Direct Play (not even "Direct Stream" -- but "Direct Play").
# this isn't a 100% fix: patching the profile allows e.g. Azumanga Daioh to play,
# but A Place Further Than the Universe still fails as before.
#
# profile is based on upstream: <https://github.com/jellyfin/jellyfin-plugin-dlna>
sane.fs."/var/lib/jellyfin/plugins/DLNA_5.0.0.0/profiles/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
# XXX(2024-11-17): old method, but the file referenced seems not to be used and setting just it causes failures:
# > [DBG] Jellyfin.Plugin.Dlna.ContentDirectory.ContentDirectoryService: Not eligible for DirectPlay due to unsupported subtitles
# sane.fs."/var/lib/jellyfin/plugins/configurations/dlna/user/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
systemd.services.jellyfin.unitConfig.RequiresMountsFor = [
"/var/media"
];
# Jellyfin multimedia server
# this is mostly taken from the official jellfin.org docs
services.nginx.virtualHosts."jelly.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8096";
proxyWebsockets = true;
recommendedProxySettings = true;
# extraConfig = ''
# # Disable buffering when the nginx proxy gets very resource heavy upon streaming
# proxy_buffering off;
# '';
};
};
sane.dns.zones."uninsane.org".inet.CNAME."jelly" = "native";
};
}

View File

@@ -1,40 +1,42 @@
{ pkgs, ... }:
{ config, lib, pkgs, ... }:
{
sane.services.kiwix-serve = {
enable = true;
port = 8013;
zimPaths = with pkgs.zimPackages; [
alpinelinux_en_all_maxi.zimPath
archlinux_en_all_maxi.zimPath
bitcoin_en_all_maxi.zimPath
devdocs_en_nix.zimPath
gentoo_en_all_maxi.zimPath
# khanacademy_en_all.zimPath #< TODO: enable
openstreetmap-wiki_en_all_maxi.zimPath
psychonautwiki_en_all_maxi.zimPath
rationalwiki_en_all_maxi.zimPath
# wikipedia_en_100.zimPath
wikipedia_en_all_maxi.zimPath
# wikipedia_en_all_mini.zimPath
zimgit-food-preparation_en.zimPath
zimgit-medicine_en.zimPath
zimgit-post-disaster_en.zimPath
zimgit-water_en.zimPath
];
};
services.nginx.virtualHosts."w.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8013";
recommendedProxySettings = true;
config = lib.mkIf (config.sane.maxBuildCost >= 3) {
sane.services.kiwix-serve = {
enable = true;
port = 8013;
zimPaths = with pkgs.zimPackages; [
alpinelinux_en_all_maxi.zimPath
archlinux_en_all_maxi.zimPath
bitcoin_en_all_maxi.zimPath
devdocs_en_nix.zimPath
gentoo_en_all_maxi.zimPath
# khanacademy_en_all.zimPath #< TODO: enable
openstreetmap-wiki_en_all_maxi.zimPath
psychonautwiki_en_all_maxi.zimPath
rationalwiki_en_all_maxi.zimPath
# wikipedia_en_100.zimPath
wikipedia_en_all_maxi.zimPath
# wikipedia_en_all_mini.zimPath
zimgit-food-preparation_en.zimPath
zimgit-medicine_en.zimPath
zimgit-post-disaster_en.zimPath
zimgit-water_en.zimPath
];
};
locations."= /robots.txt".extraConfig = ''
return 200 "User-agent: *\nDisallow: /\n";
'';
};
sane.dns.zones."uninsane.org".inet.CNAME."w" = "native";
services.nginx.virtualHosts."w.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8013";
recommendedProxySettings = true;
};
locations."= /robots.txt".extraConfig = ''
return 200 "User-agent: *\nDisallow: /\n";
'';
};
sane.dns.zones."uninsane.org".inet.CNAME."w" = "native";
};
}

View File

@@ -3,7 +3,7 @@
# - <repo:LemmyNet/lemmy:docker/nginx.conf>
# - <repo:LemmyNet/lemmy-ansible:templates/nginx.conf>
{ lib, pkgs, ... }:
{ config, lib, pkgs, ... }:
let
uiPort = 1234; # default ui port is 1234
backendPort = 8536; # default backend port is 8536
@@ -24,154 +24,156 @@ let
media.video.max_frame_count = 30 * 60 * 60;
};
in {
services.lemmy = {
enable = true;
settings.hostname = "lemmy.uninsane.org";
# federation.debug forces outbound federation queries to be run synchronously
# N.B.: this option might not be read for 0.17.0+? <https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions>
# settings.federation.debug = true;
settings.port = backendPort;
ui.port = uiPort;
database.createLocally = true;
nginx.enable = true;
};
config = lib.mkIf (config.sane.maxBuildCost >= 2) {
services.lemmy = {
enable = true;
settings.hostname = "lemmy.uninsane.org";
# federation.debug forces outbound federation queries to be run synchronously
# N.B.: this option might not be read for 0.17.0+? <https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions>
# settings.federation.debug = true;
settings.port = backendPort;
ui.port = uiPort;
database.createLocally = true;
nginx.enable = true;
};
systemd.services.lemmy.environment = {
RUST_BACKTRACE = "full";
RUST_LOG = "error";
# RUST_LOG = "warn";
# RUST_LOG = "debug";
# RUST_LOG = "trace";
# upstream defaults LEMMY_DATABASE_URL = "postgres:///lemmy?host=/run/postgresql";
# - Postgres complains that we didn't specify a user
# lemmy formats the url as:
# - postgres://{user}:{password}@{host}:{port}/{database}
# SO suggests (https://stackoverflow.com/questions/3582552/what-is-the-format-for-the-postgresql-connection-string-url):
# - postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
# LEMMY_DATABASE_URL = "postgres://lemmy@/run/postgresql"; # connection to server on socket "/run/postgresql/.s.PGSQL.5432" failed: FATAL: database "run/postgresql" does not exist
# LEMMY_DATABASE_URL = "postgres://lemmy?host=/run/postgresql"; # no PostgreSQL user name specified in startup packet
# LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@?host=/run/postgresql"; # WORKS
LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@/lemmy?host=/run/postgresql";
};
users.groups.lemmy = {};
users.users.lemmy = {
group = "lemmy";
isSystemUser = true;
};
systemd.services.lemmy.environment = {
RUST_BACKTRACE = "full";
RUST_LOG = "error";
# RUST_LOG = "warn";
# RUST_LOG = "debug";
# RUST_LOG = "trace";
# upstream defaults LEMMY_DATABASE_URL = "postgres:///lemmy?host=/run/postgresql";
# - Postgres complains that we didn't specify a user
# lemmy formats the url as:
# - postgres://{user}:{password}@{host}:{port}/{database}
# SO suggests (https://stackoverflow.com/questions/3582552/what-is-the-format-for-the-postgresql-connection-string-url):
# - postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
# LEMMY_DATABASE_URL = "postgres://lemmy@/run/postgresql"; # connection to server on socket "/run/postgresql/.s.PGSQL.5432" failed: FATAL: database "run/postgresql" does not exist
# LEMMY_DATABASE_URL = "postgres://lemmy?host=/run/postgresql"; # no PostgreSQL user name specified in startup packet
# LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@?host=/run/postgresql"; # WORKS
LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@/lemmy?host=/run/postgresql";
};
users.groups.lemmy = {};
users.users.lemmy = {
group = "lemmy";
isSystemUser = true;
};
services.nginx.virtualHosts."lemmy.uninsane.org" = {
forceSSL = true;
enableACME = true;
};
services.nginx.virtualHosts."lemmy.uninsane.org" = {
forceSSL = true;
enableACME = true;
};
sane.dns.zones."uninsane.org".inet.CNAME."lemmy" = "native";
sane.dns.zones."uninsane.org".inet.CNAME."lemmy" = "native";
systemd.services.lemmy = {
# fix to use a normal user so we can configure perms correctly
# XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away
serviceConfig.DynamicUser = lib.mkForce false;
serviceConfig.User = "lemmy";
serviceConfig.Group = "lemmy";
systemd.services.lemmy = {
# fix to use a normal user so we can configure perms correctly
# XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away
serviceConfig.DynamicUser = lib.mkForce false;
serviceConfig.User = "lemmy";
serviceConfig.Group = "lemmy";
# switch postgres from Requires -> Wants, so that postgres may restart without taking lemmy down with it.
requires = lib.mkForce [];
wants = [ "postgresql.service" ];
# switch postgres from Requires -> Wants, so that postgres may restart without taking lemmy down with it.
requires = lib.mkForce [];
wants = [ "postgresql.service" ];
# hardening (systemd-analyze security lemmy)
# a handful of these are specified in upstream nixpkgs, but mostly not
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
# hardening (systemd-analyze security lemmy)
# a handful of these are specified in upstream nixpkgs, but mostly not
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ];
};
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ];
};
systemd.services.lemmy-ui = {
# hardening (systemd-analyze security lemmy-ui)
# TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
# serviceConfig.MemoryDenyWriteExecute = true; #< it uses v8, JIT
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
systemd.services.lemmy-ui = {
# hardening (systemd-analyze security lemmy-ui)
# TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
# serviceConfig.MemoryDenyWriteExecute = true; #< it uses v8, JIT
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@pkey" "@sandbox" ];
};
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@pkey" "@sandbox" ];
};
#v DO NOT REMOVE: defaults to 0.3, instead of latest, so always need to explicitly set this.
services.pict-rs.package = pict-rs;
#v DO NOT REMOVE: defaults to 0.3, instead of latest, so always need to explicitly set this.
services.pict-rs.package = pict-rs;
systemd.services.pict-rs = {
serviceConfig.ExecStart = lib.mkForce (lib.concatStringsSep " " [
(lib.getExe pict-rs)
"--config-file"
tomlConfig
"run"
]);
systemd.services.pict-rs = {
serviceConfig.ExecStart = lib.mkForce (lib.concatStringsSep " " [
(lib.getExe pict-rs)
"--config-file"
tomlConfig
"run"
]);
# hardening (systemd-analyze security pict-rs)
# TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ];
# hardening (systemd-analyze security pict-rs)
# TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ];
};
};
}

View File

@@ -14,207 +14,209 @@ let
# logLevel = "debug";
in
{
sane.persist.sys.byStore.private = [
# contains media i've uploaded to the server
{ user = "pleroma"; group = "pleroma"; path = "/var/lib/pleroma"; method = "bind"; }
];
services.pleroma.enable = true;
services.pleroma.secretConfigFile = config.sops.secrets.pleroma_secrets.path;
services.pleroma.configs = [
''
import Config
config = lib.mkIf (config.sane.maxBuildCost >= 2) {
sane.persist.sys.byStore.private = [
# contains media i've uploaded to the server
{ user = "pleroma"; group = "pleroma"; path = "/var/lib/pleroma"; method = "bind"; }
];
services.pleroma.enable = true;
services.pleroma.secretConfigFile = config.sops.secrets.pleroma_secrets.path;
services.pleroma.configs = [
''
import Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "fed.uninsane.org", scheme: "https", port: 443],
http: [ip: {127, 0, 0, 1}, port: 4040]
# secret_key_base: "{secrets.pleroma.secret_key_base}",
# signing_salt: "{secrets.pleroma.signing_salt}"
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "fed.uninsane.org", scheme: "https", port: 443],
http: [ip: {127, 0, 0, 1}, port: 4040]
# secret_key_base: "{secrets.pleroma.secret_key_base}",
# signing_salt: "{secrets.pleroma.signing_salt}"
config :pleroma, :instance,
name: "Perfectly Sane",
description: "Single-user Pleroma instance",
email: "admin.pleroma@uninsane.org",
notify_email: "notify.pleroma@uninsane.org",
limit: 5000,
registrations_open: true,
account_approval_required: true,
max_pinned_statuses: 5,
external_user_synchronization: true
config :pleroma, :instance,
name: "Perfectly Sane",
description: "Single-user Pleroma instance",
email: "admin.pleroma@uninsane.org",
notify_email: "notify.pleroma@uninsane.org",
limit: 5000,
registrations_open: true,
account_approval_required: true,
max_pinned_statuses: 5,
external_user_synchronization: true
# docs: https://hexdocs.pm/swoosh/Swoosh.Adapters.Sendmail.html
# test mail config with sudo -u pleroma ./bin/pleroma_ctl email test --to someone@somewhere.net
config :pleroma, Pleroma.Emails.Mailer,
enabled: true,
adapter: Swoosh.Adapters.Sendmail,
cmd_path: "${lib.getExe' pkgs.postfix "sendmail"}"
# docs: https://hexdocs.pm/swoosh/Swoosh.Adapters.Sendmail.html
# test mail config with sudo -u pleroma ./bin/pleroma_ctl email test --to someone@somewhere.net
config :pleroma, Pleroma.Emails.Mailer,
enabled: true,
adapter: Swoosh.Adapters.Sendmail,
cmd_path: "${lib.getExe' pkgs.postfix "sendmail"}"
config :pleroma, Pleroma.User,
restricted_nicknames: [ "admin", "uninsane", "root" ]
config :pleroma, Pleroma.User,
restricted_nicknames: [ "admin", "uninsane", "root" ]
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
#base_url: "https://cache.pleroma.social"
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
#base_url: "https://cache.pleroma.social"
# see for reference:
# - `force_custom_plan`: <https://docs.pleroma.social/backend/configuration/postgresql/#disable-generic-query-plans>
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "pleroma",
database: "pleroma",
hostname: "localhost",
pool_size: 10,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
# XXX: prepare: :named is needed only for PG <= 12
# prepare: :named,
# password: "{secrets.pleroma.db_password}",
# see for reference:
# - `force_custom_plan`: <https://docs.pleroma.social/backend/configuration/postgresql/#disable-generic-query-plans>
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "pleroma",
database: "pleroma",
hostname: "localhost",
pool_size: 10,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
# XXX: prepare: :named is needed only for PG <= 12
# prepare: :named,
# password: "{secrets.pleroma.db_password}",
# Configure web push notifications
config :web_push_encryption, :vapid_details,
subject: "mailto:notify.pleroma@uninsane.org"
# public_key: "{secrets.pleroma.vapid_public_key}",
# private_key: "{secrets.pleroma.vapid_private_key}"
# Configure web push notifications
config :web_push_encryption, :vapid_details,
subject: "mailto:notify.pleroma@uninsane.org"
# public_key: "{secrets.pleroma.vapid_public_key}",
# private_key: "{secrets.pleroma.vapid_private_key}"
# config :joken, default_signer: "{secrets.pleroma.joken_default_signer}"
# config :joken, default_signer: "{secrets.pleroma.joken_default_signer}"
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/instance/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
config :pleroma, configurable_from_database: false
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/instance/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
config :pleroma, configurable_from_database: false
# strip metadata from uploaded images
config :pleroma, Pleroma.Upload, filters: [Pleroma.Upload.Filter.Exiftool.StripLocation]
# strip metadata from uploaded images
config :pleroma, Pleroma.Upload, filters: [Pleroma.Upload.Filter.Exiftool.StripLocation]
# fix log spam: <https://git.pleroma.social/pleroma/pleroma/-/issues/1659>
# specifically, remove LAN addresses from `reserved`
config :pleroma, Pleroma.Web.Plugs.RemoteIp,
enabled: true,
reserved: ["127.0.0.0/8", "::1/128", "fc00::/7", "172.16.0.0/12"]
# fix log spam: <https://git.pleroma.social/pleroma/pleroma/-/issues/1659>
# specifically, remove LAN addresses from `reserved`
config :pleroma, Pleroma.Web.Plugs.RemoteIp,
enabled: true,
reserved: ["127.0.0.0/8", "::1/128", "fc00::/7", "172.16.0.0/12"]
# TODO: GET /api/pleroma/captcha is broken
# there was a nixpkgs PR to fix this around 2022/10 though.
config :pleroma, Pleroma.Captcha,
enabled: false,
method: Pleroma.Captcha.Native
# TODO: GET /api/pleroma/captcha is broken
# there was a nixpkgs PR to fix this around 2022/10 though.
config :pleroma, Pleroma.Captcha,
enabled: false,
method: Pleroma.Captcha.Native
# (enabled by colin)
# Enable Strict-Transport-Security once SSL is working:
config :pleroma, :http_security,
sts: true
# (enabled by colin)
# Enable Strict-Transport-Security once SSL is working:
config :pleroma, :http_security,
sts: true
# docs: https://docs.pleroma.social/backend/configuration/cheatsheet/#logger
config :logger,
backends: [{ExSyslogger, :ex_syslogger}]
# docs: https://docs.pleroma.social/backend/configuration/cheatsheet/#logger
config :logger,
backends: [{ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger,
level: :${logLevel}
config :logger, :ex_syslogger,
level: :${logLevel}
# policies => list of message rewriting facilities to be enabled
# transparence => whether to publish these rules in node_info (and /about)
config :pleroma, :mrf,
policies: [Pleroma.Web.ActivityPub.MRF.SimplePolicy],
transparency: true
# policies => list of message rewriting facilities to be enabled
# transparence => whether to publish these rules in node_info (and /about)
config :pleroma, :mrf,
policies: [Pleroma.Web.ActivityPub.MRF.SimplePolicy],
transparency: true
# reject => { host, reason }
config :pleroma, :mrf_simple,
reject: [ {"threads.net", "megacorp"}, {"*.threads.net", "megacorp"} ]
# reject: [ [host: "threads.net", reason: "megacorp"], [host: "*.threads.net", reason: "megacorp"] ]
# reject => { host, reason }
config :pleroma, :mrf_simple,
reject: [ {"threads.net", "megacorp"}, {"*.threads.net", "megacorp"} ]
# reject: [ [host: "threads.net", reason: "megacorp"], [host: "*.threads.net", reason: "megacorp"] ]
# XXX colin: not sure if this actually _does_ anything
# better to steal emoji from other instances?
# - <https://docs.pleroma.social/backend/configuration/cheatsheet/#mrf_steal_emoji>
config :pleroma, :emoji,
shortcode_globs: ["/emoji/**/*.png"],
groups: [
"Cirno": "/emoji/cirno/*.png",
"Kirby": "/emoji/kirby/*.png",
"Bun": "/emoji/bun/*.png",
"Yuru Camp": "/emoji/yuru_camp/*.png",
]
''
];
# XXX colin: not sure if this actually _does_ anything
# better to steal emoji from other instances?
# - <https://docs.pleroma.social/backend/configuration/cheatsheet/#mrf_steal_emoji>
config :pleroma, :emoji,
shortcode_globs: ["/emoji/**/*.png"],
groups: [
"Cirno": "/emoji/cirno/*.png",
"Kirby": "/emoji/kirby/*.png",
"Bun": "/emoji/bun/*.png",
"Yuru Camp": "/emoji/yuru_camp/*.png",
]
''
];
systemd.services.pleroma.path = [
# something inside pleroma invokes `sh` w/o specifying it by path, so this is needed to allow pleroma to start
pkgs.bash
# used by Pleroma to strip geo tags from uploads
pkgs.exiftool
# config.sane.programs.exiftool.package #< XXX(2024-10-20): breaks image uploading
# i saw some errors when pleroma was shutting down about it not being able to find `awk`. probably not critical
# config.sane.programs.gawk.package
# needed for email operations like password reset
pkgs.postfix
];
systemd.services.pleroma.path = [
# something inside pleroma invokes `sh` w/o specifying it by path, so this is needed to allow pleroma to start
pkgs.bash
# used by Pleroma to strip geo tags from uploads
pkgs.exiftool
# config.sane.programs.exiftool.package #< XXX(2024-10-20): breaks image uploading
# i saw some errors when pleroma was shutting down about it not being able to find `awk`. probably not critical
# config.sane.programs.gawk.package
# needed for email operations like password reset
pkgs.postfix
];
systemd.services.pleroma = {
# postgres can be slow to service early requests, preventing pleroma from starting on the first try
serviceConfig.Restart = "on-failure";
serviceConfig.RestartSec = "10s";
systemd.services.pleroma = {
# postgres can be slow to service early requests, preventing pleroma from starting on the first try
serviceConfig.Restart = "on-failure";
serviceConfig.RestartSec = "10s";
# hardening (systemd-analyze security pleroma)
# XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away
# make sure to test:
# - image/media uploading
serviceConfig.CapabilityBoundingSet = lib.mkForce [ "" "" ]; # nixos default is `~CAP_SYS_ADMIN`
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = lib.mkForce true; #< dunno why nixpkgs has this set false; it seems to work as true
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
# hardening (systemd-analyze security pleroma)
# XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away
# make sure to test:
# - image/media uploading
serviceConfig.CapabilityBoundingSet = lib.mkForce [ "" "" ]; # nixos default is `~CAP_SYS_ADMIN`
serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = lib.mkForce true; #< dunno why nixpkgs has this set false; it seems to work as true
serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true;
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProcSubset = "all"; #< needs /proc/sys/kernel/overflowuid for bwrap
serviceConfig.ProtectProc = "invisible";
serviceConfig.ProcSubset = "all"; #< needs /proc/sys/kernel/overflowuid for bwrap
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectSystem = lib.mkForce "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectSystem = lib.mkForce "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@mount" "@sandbox" ]; #< "sandbox" might not actually be necessary
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@mount" "@sandbox" ]; #< "sandbox" might not actually be necessary
serviceConfig.ProtectHostname = false; #< else brap can't mount /proc
serviceConfig.ProtectKernelLogs = false; #< else breaks exiftool ("bwrap: Can't mount proc on /newroot/proc: Operation not permitted")
serviceConfig.ProtectKernelTunables = false; #< else breaks exiftool
serviceConfig.RestrictNamespaces = false; # media uploads require bwrap
};
serviceConfig.ProtectHostname = false; #< else brap can't mount /proc
serviceConfig.ProtectKernelLogs = false; #< else breaks exiftool ("bwrap: Can't mount proc on /newroot/proc: Operation not permitted")
serviceConfig.ProtectKernelTunables = false; #< else breaks exiftool
serviceConfig.RestrictNamespaces = false; # media uploads require bwrap
};
# this is required to allow pleroma to send email.
# raw `sendmail` works, but i think pleroma's passing it some funny flags or something, idk.
# hack to fix that.
users.users.pleroma.extraGroups = [ "postdrop" ];
# this is required to allow pleroma to send email.
# raw `sendmail` works, but i think pleroma's passing it some funny flags or something, idk.
# hack to fix that.
users.users.pleroma.extraGroups = [ "postdrop" ];
# Pleroma server and web interface
# TODO: enable publog?
services.nginx.virtualHosts."fed.uninsane.org" = {
forceSSL = true; # pleroma redirects to https anyway
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:4040";
recommendedProxySettings = true;
# documented: https://git.pleroma.social/pleroma/pleroma/-/blob/develop/installation/pleroma.nginx
extraConfig = ''
# client_max_body_size defines the maximum upload size
client_max_body_size 16m;
'';
# Pleroma server and web interface
# TODO: enable publog?
services.nginx.virtualHosts."fed.uninsane.org" = {
forceSSL = true; # pleroma redirects to https anyway
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:4040";
recommendedProxySettings = true;
# documented: https://git.pleroma.social/pleroma/pleroma/-/blob/develop/installation/pleroma.nginx
extraConfig = ''
# client_max_body_size defines the maximum upload size
client_max_body_size 16m;
'';
};
};
sane.dns.zones."uninsane.org".inet.CNAME."fed" = "native";
sops.secrets."pleroma_secrets" = {
owner = config.users.users.pleroma.name;
};
};
sane.dns.zones."uninsane.org".inet.CNAME."fed" = "native";
sops.secrets."pleroma_secrets" = {
owner = config.users.users.pleroma.name;
};
}