servo: gate costly services behind sane.maxBuildCost option

This commit is contained in:
2025-08-21 02:42:58 +00:00
parent 91578c0b78
commit e700ff392f
4 changed files with 502 additions and 494 deletions

View File

@@ -14,158 +14,160 @@
# #
# N.B.: default install DOES NOT SUPPORT DLNA out of the box. # N.B.: default install DOES NOT SUPPORT DLNA out of the box.
# one must install it as a "plugin", which can be done through the UI. # one must install it as a "plugin", which can be done through the UI.
{ ... }: { config, lib, ... }:
# lib.mkIf false #< XXX(2024-11-17): disabled because it hasn't been working for months; web UI hangs on load, TVs see no files # lib.mkIf false #< XXX(2024-11-17): disabled because it hasn't been working for months; web UI hangs on load, TVs see no files
{ {
# https://jellyfin.org/docs/general/networking/index.html config = lib.mkIf (config.sane.maxBuildCost >= 2) {
sane.ports.ports."1900" = { # https://jellyfin.org/docs/general/networking/index.html
protocol = [ "udp" ]; sane.ports.ports."1900" = {
visibleTo.lan = true; protocol = [ "udp" ];
description = "colin-upnp-for-jellyfin"; visibleTo.lan = true;
}; description = "colin-upnp-for-jellyfin";
sane.ports.ports."7359" = { };
protocol = [ "udp" ]; sane.ports.ports."7359" = {
visibleTo.lan = true; protocol = [ "udp" ];
description = "colin-jellyfin-specific-client-discovery"; visibleTo.lan = true;
# ^ not sure if this is necessary: copied this port from nixos jellyfin.openFirewall description = "colin-jellyfin-specific-client-discovery";
}; # ^ not sure if this is necessary: copied this port from nixos jellyfin.openFirewall
# not sure if 8096/8920 get used either: };
sane.ports.ports."8096" = { # not sure if 8096/8920 get used either:
protocol = [ "tcp" ]; sane.ports.ports."8096" = {
visibleTo.lan = true; protocol = [ "tcp" ];
description = "colin-jellyfin-http-lan"; visibleTo.lan = true;
}; description = "colin-jellyfin-http-lan";
sane.ports.ports."8920" = { };
protocol = [ "tcp" ]; sane.ports.ports."8920" = {
visibleTo.lan = true; protocol = [ "tcp" ];
description = "colin-jellyfin-https-lan"; visibleTo.lan = true;
}; description = "colin-jellyfin-https-lan";
sane.persist.sys.byStore.plaintext = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/data"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/metadata"; method = "bind"; }
# TODO: ship plugins statically, via nix. that'll be less fragile
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/plugins/DLNA_5.0.0.0"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/root"; method = "bind"; }
];
sane.persist.sys.byStore.ephemeral = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/log"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/transcodes"; method = "bind"; }
];
services.jellyfin.enable = true;
users.users.jellyfin.extraGroups = [ "media" ];
sane.fs."/var/lib/jellyfin".dir.acl = {
user = "jellyfin";
group = "jellyfin";
mode = "0700";
};
# `"Jellyfin.Plugin.Dlna": "Debug"` logging: <https://jellyfin.org/docs/general/networking/dlna>
# TODO: switch Dlna back to 'Information' once satisfied with stability
sane.fs."/var/lib/jellyfin/config/logging.json".symlink.text = ''
{
"Serilog": {
"MinimumLevel": {
"Default": "Information",
"Override": {
"Microsoft": "Warning",
"System": "Warning",
"Jellyfin.Plugin.Dlna": "Debug"
}
},
"WriteTo": [
{
"Name": "Console",
"Args": {
"outputTemplate": "[{Timestamp:HH:mm:ss}] [{Level:u3}] [{ThreadId}] {SourceContext}: {Message:lj}{NewLine}{Exception}"
}
}
],
"Enrich": [ "FromLogContext", "WithThreadId" ]
}
}
'';
sane.fs."/var/lib/jellyfin/config/network.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<BaseUrl />
<EnableHttps>false</EnableHttps>
<RequireHttps>false</RequireHttps>
<InternalHttpPort>8096</InternalHttpPort>
<InternalHttpsPort>8920</InternalHttpsPort>
<PublicHttpPort>8096</PublicHttpPort>
<PublicHttpsPort>8920</PublicHttpsPort>
<AutoDiscovery>true</AutoDiscovery>
<EnableUPnP>false</EnableUPnP>
<EnableIPv4>true</EnableIPv4>
<EnableIPv6>false</EnableIPv6>
<EnableRemoteAccess>true</EnableRemoteAccess>
<LocalNetworkSubnets>
<string>10.78.76.0/22</string>
</LocalNetworkSubnets>
<KnownProxies>
<string>127.0.0.1</string>
<string>localhost</string>
<string>10.78.79.1</string>
</KnownProxies>
<IgnoreVirtualInterfaces>false</IgnoreVirtualInterfaces>
<VirtualInterfaceNames />
<EnablePublishedServerUriByRequest>false</EnablePublishedServerUriByRequest>
<PublishedServerUriBySubnet />
<RemoteIPFilter />
<IsRemoteIPFilterBlacklist>false</IsRemoteIPFilterBlacklist>
</NetworkConfiguration>
'';
# guest user id is `5ad194d60dca41de84b332950ffc4308`
sane.fs."/var/lib/jellyfin/plugins/configurations/Jellyfin.Plugin.Dlna.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<DlnaPluginConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<EnablePlayTo>true</EnablePlayTo>
<ClientDiscoveryIntervalSeconds>60</ClientDiscoveryIntervalSeconds>
<BlastAliveMessages>true</BlastAliveMessages>
<AliveMessageIntervalSeconds>180</AliveMessageIntervalSeconds>
<SendOnlyMatchedHost>true</SendOnlyMatchedHost>
<DefaultUserId>5ad194d6-0dca-41de-84b3-32950ffc4308</DefaultUserId>
</DlnaPluginConfiguration>
'';
# fix LG TV to play more files.
# there are certain files for which it only supports Direct Play (not even "Direct Stream" -- but "Direct Play").
# this isn't a 100% fix: patching the profile allows e.g. Azumanga Daioh to play,
# but A Place Further Than the Universe still fails as before.
#
# profile is based on upstream: <https://github.com/jellyfin/jellyfin-plugin-dlna>
sane.fs."/var/lib/jellyfin/plugins/DLNA_5.0.0.0/profiles/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
# XXX(2024-11-17): old method, but the file referenced seems not to be used and setting just it causes failures:
# > [DBG] Jellyfin.Plugin.Dlna.ContentDirectory.ContentDirectoryService: Not eligible for DirectPlay due to unsupported subtitles
# sane.fs."/var/lib/jellyfin/plugins/configurations/dlna/user/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
systemd.services.jellyfin.unitConfig.RequiresMountsFor = [
"/var/media"
];
# Jellyfin multimedia server
# this is mostly taken from the official jellfin.org docs
services.nginx.virtualHosts."jelly.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8096";
proxyWebsockets = true;
recommendedProxySettings = true;
# extraConfig = ''
# # Disable buffering when the nginx proxy gets very resource heavy upon streaming
# proxy_buffering off;
# '';
}; };
};
sane.dns.zones."uninsane.org".inet.CNAME."jelly" = "native"; sane.persist.sys.byStore.plaintext = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/data"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/metadata"; method = "bind"; }
# TODO: ship plugins statically, via nix. that'll be less fragile
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/plugins/DLNA_5.0.0.0"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/root"; method = "bind"; }
];
sane.persist.sys.byStore.ephemeral = [
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/log"; method = "bind"; }
{ user = "jellyfin"; group = "jellyfin"; mode = "0700"; path = "/var/lib/jellyfin/transcodes"; method = "bind"; }
];
services.jellyfin.enable = true;
users.users.jellyfin.extraGroups = [ "media" ];
sane.fs."/var/lib/jellyfin".dir.acl = {
user = "jellyfin";
group = "jellyfin";
mode = "0700";
};
# `"Jellyfin.Plugin.Dlna": "Debug"` logging: <https://jellyfin.org/docs/general/networking/dlna>
# TODO: switch Dlna back to 'Information' once satisfied with stability
sane.fs."/var/lib/jellyfin/config/logging.json".symlink.text = ''
{
"Serilog": {
"MinimumLevel": {
"Default": "Information",
"Override": {
"Microsoft": "Warning",
"System": "Warning",
"Jellyfin.Plugin.Dlna": "Debug"
}
},
"WriteTo": [
{
"Name": "Console",
"Args": {
"outputTemplate": "[{Timestamp:HH:mm:ss}] [{Level:u3}] [{ThreadId}] {SourceContext}: {Message:lj}{NewLine}{Exception}"
}
}
],
"Enrich": [ "FromLogContext", "WithThreadId" ]
}
}
'';
sane.fs."/var/lib/jellyfin/config/network.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<BaseUrl />
<EnableHttps>false</EnableHttps>
<RequireHttps>false</RequireHttps>
<InternalHttpPort>8096</InternalHttpPort>
<InternalHttpsPort>8920</InternalHttpsPort>
<PublicHttpPort>8096</PublicHttpPort>
<PublicHttpsPort>8920</PublicHttpsPort>
<AutoDiscovery>true</AutoDiscovery>
<EnableUPnP>false</EnableUPnP>
<EnableIPv4>true</EnableIPv4>
<EnableIPv6>false</EnableIPv6>
<EnableRemoteAccess>true</EnableRemoteAccess>
<LocalNetworkSubnets>
<string>10.78.76.0/22</string>
</LocalNetworkSubnets>
<KnownProxies>
<string>127.0.0.1</string>
<string>localhost</string>
<string>10.78.79.1</string>
</KnownProxies>
<IgnoreVirtualInterfaces>false</IgnoreVirtualInterfaces>
<VirtualInterfaceNames />
<EnablePublishedServerUriByRequest>false</EnablePublishedServerUriByRequest>
<PublishedServerUriBySubnet />
<RemoteIPFilter />
<IsRemoteIPFilterBlacklist>false</IsRemoteIPFilterBlacklist>
</NetworkConfiguration>
'';
# guest user id is `5ad194d60dca41de84b332950ffc4308`
sane.fs."/var/lib/jellyfin/plugins/configurations/Jellyfin.Plugin.Dlna.xml".file.text = ''
<?xml version="1.0" encoding="utf-8"?>
<DlnaPluginConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<EnablePlayTo>true</EnablePlayTo>
<ClientDiscoveryIntervalSeconds>60</ClientDiscoveryIntervalSeconds>
<BlastAliveMessages>true</BlastAliveMessages>
<AliveMessageIntervalSeconds>180</AliveMessageIntervalSeconds>
<SendOnlyMatchedHost>true</SendOnlyMatchedHost>
<DefaultUserId>5ad194d6-0dca-41de-84b3-32950ffc4308</DefaultUserId>
</DlnaPluginConfiguration>
'';
# fix LG TV to play more files.
# there are certain files for which it only supports Direct Play (not even "Direct Stream" -- but "Direct Play").
# this isn't a 100% fix: patching the profile allows e.g. Azumanga Daioh to play,
# but A Place Further Than the Universe still fails as before.
#
# profile is based on upstream: <https://github.com/jellyfin/jellyfin-plugin-dlna>
sane.fs."/var/lib/jellyfin/plugins/DLNA_5.0.0.0/profiles/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
# XXX(2024-11-17): old method, but the file referenced seems not to be used and setting just it causes failures:
# > [DBG] Jellyfin.Plugin.Dlna.ContentDirectory.ContentDirectoryService: Not eligible for DirectPlay due to unsupported subtitles
# sane.fs."/var/lib/jellyfin/plugins/configurations/dlna/user/LG Smart TV.xml".symlink.target = ./dlna/user/LG_Smart_TV.xml;
systemd.services.jellyfin.unitConfig.RequiresMountsFor = [
"/var/media"
];
# Jellyfin multimedia server
# this is mostly taken from the official jellfin.org docs
services.nginx.virtualHosts."jelly.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8096";
proxyWebsockets = true;
recommendedProxySettings = true;
# extraConfig = ''
# # Disable buffering when the nginx proxy gets very resource heavy upon streaming
# proxy_buffering off;
# '';
};
};
sane.dns.zones."uninsane.org".inet.CNAME."jelly" = "native";
};
} }

View File

@@ -1,40 +1,42 @@
{ pkgs, ... }: { config, lib, pkgs, ... }:
{ {
sane.services.kiwix-serve = { config = lib.mkIf (config.sane.maxBuildCost >= 3) {
enable = true; sane.services.kiwix-serve = {
port = 8013; enable = true;
zimPaths = with pkgs.zimPackages; [ port = 8013;
alpinelinux_en_all_maxi.zimPath zimPaths = with pkgs.zimPackages; [
archlinux_en_all_maxi.zimPath alpinelinux_en_all_maxi.zimPath
bitcoin_en_all_maxi.zimPath archlinux_en_all_maxi.zimPath
devdocs_en_nix.zimPath bitcoin_en_all_maxi.zimPath
gentoo_en_all_maxi.zimPath devdocs_en_nix.zimPath
# khanacademy_en_all.zimPath #< TODO: enable gentoo_en_all_maxi.zimPath
openstreetmap-wiki_en_all_maxi.zimPath # khanacademy_en_all.zimPath #< TODO: enable
psychonautwiki_en_all_maxi.zimPath openstreetmap-wiki_en_all_maxi.zimPath
rationalwiki_en_all_maxi.zimPath psychonautwiki_en_all_maxi.zimPath
# wikipedia_en_100.zimPath rationalwiki_en_all_maxi.zimPath
wikipedia_en_all_maxi.zimPath # wikipedia_en_100.zimPath
# wikipedia_en_all_mini.zimPath wikipedia_en_all_maxi.zimPath
zimgit-food-preparation_en.zimPath # wikipedia_en_all_mini.zimPath
zimgit-medicine_en.zimPath zimgit-food-preparation_en.zimPath
zimgit-post-disaster_en.zimPath zimgit-medicine_en.zimPath
zimgit-water_en.zimPath zimgit-post-disaster_en.zimPath
]; zimgit-water_en.zimPath
}; ];
services.nginx.virtualHosts."w.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8013";
recommendedProxySettings = true;
}; };
locations."= /robots.txt".extraConfig = ''
return 200 "User-agent: *\nDisallow: /\n";
'';
};
sane.dns.zones."uninsane.org".inet.CNAME."w" = "native"; services.nginx.virtualHosts."w.uninsane.org" = {
forceSSL = true;
enableACME = true;
# inherit kTLS;
locations."/" = {
proxyPass = "http://127.0.0.1:8013";
recommendedProxySettings = true;
};
locations."= /robots.txt".extraConfig = ''
return 200 "User-agent: *\nDisallow: /\n";
'';
};
sane.dns.zones."uninsane.org".inet.CNAME."w" = "native";
};
} }

View File

@@ -3,7 +3,7 @@
# - <repo:LemmyNet/lemmy:docker/nginx.conf> # - <repo:LemmyNet/lemmy:docker/nginx.conf>
# - <repo:LemmyNet/lemmy-ansible:templates/nginx.conf> # - <repo:LemmyNet/lemmy-ansible:templates/nginx.conf>
{ lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
uiPort = 1234; # default ui port is 1234 uiPort = 1234; # default ui port is 1234
backendPort = 8536; # default backend port is 8536 backendPort = 8536; # default backend port is 8536
@@ -24,154 +24,156 @@ let
media.video.max_frame_count = 30 * 60 * 60; media.video.max_frame_count = 30 * 60 * 60;
}; };
in { in {
services.lemmy = { config = lib.mkIf (config.sane.maxBuildCost >= 2) {
enable = true; services.lemmy = {
settings.hostname = "lemmy.uninsane.org"; enable = true;
# federation.debug forces outbound federation queries to be run synchronously settings.hostname = "lemmy.uninsane.org";
# N.B.: this option might not be read for 0.17.0+? <https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions> # federation.debug forces outbound federation queries to be run synchronously
# settings.federation.debug = true; # N.B.: this option might not be read for 0.17.0+? <https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions>
settings.port = backendPort; # settings.federation.debug = true;
ui.port = uiPort; settings.port = backendPort;
database.createLocally = true; ui.port = uiPort;
nginx.enable = true; database.createLocally = true;
}; nginx.enable = true;
};
systemd.services.lemmy.environment = { systemd.services.lemmy.environment = {
RUST_BACKTRACE = "full"; RUST_BACKTRACE = "full";
RUST_LOG = "error"; RUST_LOG = "error";
# RUST_LOG = "warn"; # RUST_LOG = "warn";
# RUST_LOG = "debug"; # RUST_LOG = "debug";
# RUST_LOG = "trace"; # RUST_LOG = "trace";
# upstream defaults LEMMY_DATABASE_URL = "postgres:///lemmy?host=/run/postgresql"; # upstream defaults LEMMY_DATABASE_URL = "postgres:///lemmy?host=/run/postgresql";
# - Postgres complains that we didn't specify a user # - Postgres complains that we didn't specify a user
# lemmy formats the url as: # lemmy formats the url as:
# - postgres://{user}:{password}@{host}:{port}/{database} # - postgres://{user}:{password}@{host}:{port}/{database}
# SO suggests (https://stackoverflow.com/questions/3582552/what-is-the-format-for-the-postgresql-connection-string-url): # SO suggests (https://stackoverflow.com/questions/3582552/what-is-the-format-for-the-postgresql-connection-string-url):
# - postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] # - postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
# LEMMY_DATABASE_URL = "postgres://lemmy@/run/postgresql"; # connection to server on socket "/run/postgresql/.s.PGSQL.5432" failed: FATAL: database "run/postgresql" does not exist # LEMMY_DATABASE_URL = "postgres://lemmy@/run/postgresql"; # connection to server on socket "/run/postgresql/.s.PGSQL.5432" failed: FATAL: database "run/postgresql" does not exist
# LEMMY_DATABASE_URL = "postgres://lemmy?host=/run/postgresql"; # no PostgreSQL user name specified in startup packet # LEMMY_DATABASE_URL = "postgres://lemmy?host=/run/postgresql"; # no PostgreSQL user name specified in startup packet
# LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@?host=/run/postgresql"; # WORKS # LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@?host=/run/postgresql"; # WORKS
LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@/lemmy?host=/run/postgresql"; LEMMY_DATABASE_URL = lib.mkForce "postgres://lemmy@/lemmy?host=/run/postgresql";
}; };
users.groups.lemmy = {}; users.groups.lemmy = {};
users.users.lemmy = { users.users.lemmy = {
group = "lemmy"; group = "lemmy";
isSystemUser = true; isSystemUser = true;
}; };
services.nginx.virtualHosts."lemmy.uninsane.org" = { services.nginx.virtualHosts."lemmy.uninsane.org" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
}; };
sane.dns.zones."uninsane.org".inet.CNAME."lemmy" = "native"; sane.dns.zones."uninsane.org".inet.CNAME."lemmy" = "native";
systemd.services.lemmy = { systemd.services.lemmy = {
# fix to use a normal user so we can configure perms correctly # fix to use a normal user so we can configure perms correctly
# XXX(2024-07-28): this hasn't been rigorously tested: # XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away # possible that i've set something too strict and won't notice right away
serviceConfig.DynamicUser = lib.mkForce false; serviceConfig.DynamicUser = lib.mkForce false;
serviceConfig.User = "lemmy"; serviceConfig.User = "lemmy";
serviceConfig.Group = "lemmy"; serviceConfig.Group = "lemmy";
# switch postgres from Requires -> Wants, so that postgres may restart without taking lemmy down with it. # switch postgres from Requires -> Wants, so that postgres may restart without taking lemmy down with it.
requires = lib.mkForce []; requires = lib.mkForce [];
wants = [ "postgresql.service" ]; wants = [ "postgresql.service" ];
# hardening (systemd-analyze security lemmy) # hardening (systemd-analyze security lemmy)
# a handful of these are specified in upstream nixpkgs, but mostly not # a handful of these are specified in upstream nixpkgs, but mostly not
serviceConfig.LockPersonality = true; serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true; serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true; serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true; serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true; serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true; serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true; serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid"; serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true; serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true; serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true; serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true; serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true; serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true; serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true; serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible"; serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict"; serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true; serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6"; serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true; serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true; serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native"; serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ]; serviceConfig.SystemCallFilter = [ "@system-service" ];
}; };
systemd.services.lemmy-ui = { systemd.services.lemmy-ui = {
# hardening (systemd-analyze security lemmy-ui) # hardening (systemd-analyze security lemmy-ui)
# TODO: upstream into nixpkgs # TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true; serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true; serviceConfig.NoNewPrivileges = true;
# serviceConfig.MemoryDenyWriteExecute = true; #< it uses v8, JIT # serviceConfig.MemoryDenyWriteExecute = true; #< it uses v8, JIT
serviceConfig.PrivateDevices = true; serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true; serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true; serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true; serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid"; serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true; serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true; serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true; serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true; serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true; serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true; serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true; serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible"; serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict"; serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true; serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6"; serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true; serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true; serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native"; serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@pkey" "@sandbox" ]; serviceConfig.SystemCallFilter = [ "@system-service" "@pkey" "@sandbox" ];
}; };
#v DO NOT REMOVE: defaults to 0.3, instead of latest, so always need to explicitly set this. #v DO NOT REMOVE: defaults to 0.3, instead of latest, so always need to explicitly set this.
services.pict-rs.package = pict-rs; services.pict-rs.package = pict-rs;
systemd.services.pict-rs = { systemd.services.pict-rs = {
serviceConfig.ExecStart = lib.mkForce (lib.concatStringsSep " " [ serviceConfig.ExecStart = lib.mkForce (lib.concatStringsSep " " [
(lib.getExe pict-rs) (lib.getExe pict-rs)
"--config-file" "--config-file"
tomlConfig tomlConfig
"run" "run"
]); ]);
# hardening (systemd-analyze security pict-rs) # hardening (systemd-analyze security pict-rs)
# TODO: upstream into nixpkgs # TODO: upstream into nixpkgs
serviceConfig.LockPersonality = true; serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true; serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true; serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = true; serviceConfig.PrivateDevices = true;
serviceConfig.PrivateMounts = true; serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true; serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true; serviceConfig.PrivateUsers = true;
serviceConfig.ProcSubset = "pid"; serviceConfig.ProcSubset = "pid";
serviceConfig.ProtectClock = true; serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true; serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true; serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true; serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true; serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true; serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true; serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectProc = "invisible"; serviceConfig.ProtectProc = "invisible";
serviceConfig.ProtectSystem = "strict"; serviceConfig.ProtectSystem = "strict";
serviceConfig.RemoveIPC = true; serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6"; serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
serviceConfig.RestrictNamespaces = true; serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictSUIDSGID = true; serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native"; serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" ]; serviceConfig.SystemCallFilter = [ "@system-service" ];
};
}; };
} }

View File

@@ -14,207 +14,209 @@ let
# logLevel = "debug"; # logLevel = "debug";
in in
{ {
sane.persist.sys.byStore.private = [ config = lib.mkIf (config.sane.maxBuildCost >= 2) {
# contains media i've uploaded to the server sane.persist.sys.byStore.private = [
{ user = "pleroma"; group = "pleroma"; path = "/var/lib/pleroma"; method = "bind"; } # contains media i've uploaded to the server
]; { user = "pleroma"; group = "pleroma"; path = "/var/lib/pleroma"; method = "bind"; }
services.pleroma.enable = true; ];
services.pleroma.secretConfigFile = config.sops.secrets.pleroma_secrets.path; services.pleroma.enable = true;
services.pleroma.configs = [ services.pleroma.secretConfigFile = config.sops.secrets.pleroma_secrets.path;
'' services.pleroma.configs = [
import Config ''
import Config
config :pleroma, Pleroma.Web.Endpoint, config :pleroma, Pleroma.Web.Endpoint,
url: [host: "fed.uninsane.org", scheme: "https", port: 443], url: [host: "fed.uninsane.org", scheme: "https", port: 443],
http: [ip: {127, 0, 0, 1}, port: 4040] http: [ip: {127, 0, 0, 1}, port: 4040]
# secret_key_base: "{secrets.pleroma.secret_key_base}", # secret_key_base: "{secrets.pleroma.secret_key_base}",
# signing_salt: "{secrets.pleroma.signing_salt}" # signing_salt: "{secrets.pleroma.signing_salt}"
config :pleroma, :instance, config :pleroma, :instance,
name: "Perfectly Sane", name: "Perfectly Sane",
description: "Single-user Pleroma instance", description: "Single-user Pleroma instance",
email: "admin.pleroma@uninsane.org", email: "admin.pleroma@uninsane.org",
notify_email: "notify.pleroma@uninsane.org", notify_email: "notify.pleroma@uninsane.org",
limit: 5000, limit: 5000,
registrations_open: true, registrations_open: true,
account_approval_required: true, account_approval_required: true,
max_pinned_statuses: 5, max_pinned_statuses: 5,
external_user_synchronization: true external_user_synchronization: true
# docs: https://hexdocs.pm/swoosh/Swoosh.Adapters.Sendmail.html # docs: https://hexdocs.pm/swoosh/Swoosh.Adapters.Sendmail.html
# test mail config with sudo -u pleroma ./bin/pleroma_ctl email test --to someone@somewhere.net # test mail config with sudo -u pleroma ./bin/pleroma_ctl email test --to someone@somewhere.net
config :pleroma, Pleroma.Emails.Mailer, config :pleroma, Pleroma.Emails.Mailer,
enabled: true, enabled: true,
adapter: Swoosh.Adapters.Sendmail, adapter: Swoosh.Adapters.Sendmail,
cmd_path: "${lib.getExe' pkgs.postfix "sendmail"}" cmd_path: "${lib.getExe' pkgs.postfix "sendmail"}"
config :pleroma, Pleroma.User, config :pleroma, Pleroma.User,
restricted_nicknames: [ "admin", "uninsane", "root" ] restricted_nicknames: [ "admin", "uninsane", "root" ]
config :pleroma, :media_proxy, config :pleroma, :media_proxy,
enabled: false, enabled: false,
redirect_on_failure: true redirect_on_failure: true
#base_url: "https://cache.pleroma.social" #base_url: "https://cache.pleroma.social"
# see for reference: # see for reference:
# - `force_custom_plan`: <https://docs.pleroma.social/backend/configuration/postgresql/#disable-generic-query-plans> # - `force_custom_plan`: <https://docs.pleroma.social/backend/configuration/postgresql/#disable-generic-query-plans>
config :pleroma, Pleroma.Repo, config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres, adapter: Ecto.Adapters.Postgres,
username: "pleroma", username: "pleroma",
database: "pleroma", database: "pleroma",
hostname: "localhost", hostname: "localhost",
pool_size: 10, pool_size: 10,
prepare: :named, prepare: :named,
parameters: [ parameters: [
plan_cache_mode: "force_custom_plan" plan_cache_mode: "force_custom_plan"
] ]
# XXX: prepare: :named is needed only for PG <= 12 # XXX: prepare: :named is needed only for PG <= 12
# prepare: :named, # prepare: :named,
# password: "{secrets.pleroma.db_password}", # password: "{secrets.pleroma.db_password}",
# Configure web push notifications # Configure web push notifications
config :web_push_encryption, :vapid_details, config :web_push_encryption, :vapid_details,
subject: "mailto:notify.pleroma@uninsane.org" subject: "mailto:notify.pleroma@uninsane.org"
# public_key: "{secrets.pleroma.vapid_public_key}", # public_key: "{secrets.pleroma.vapid_public_key}",
# private_key: "{secrets.pleroma.vapid_private_key}" # private_key: "{secrets.pleroma.vapid_private_key}"
# config :joken, default_signer: "{secrets.pleroma.joken_default_signer}" # config :joken, default_signer: "{secrets.pleroma.joken_default_signer}"
config :pleroma, :database, rum_enabled: false config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/instance/static" config :pleroma, :instance, static_dir: "/var/lib/pleroma/instance/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads" config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
config :pleroma, configurable_from_database: false config :pleroma, configurable_from_database: false
# strip metadata from uploaded images # strip metadata from uploaded images
config :pleroma, Pleroma.Upload, filters: [Pleroma.Upload.Filter.Exiftool.StripLocation] config :pleroma, Pleroma.Upload, filters: [Pleroma.Upload.Filter.Exiftool.StripLocation]
# fix log spam: <https://git.pleroma.social/pleroma/pleroma/-/issues/1659> # fix log spam: <https://git.pleroma.social/pleroma/pleroma/-/issues/1659>
# specifically, remove LAN addresses from `reserved` # specifically, remove LAN addresses from `reserved`
config :pleroma, Pleroma.Web.Plugs.RemoteIp, config :pleroma, Pleroma.Web.Plugs.RemoteIp,
enabled: true, enabled: true,
reserved: ["127.0.0.0/8", "::1/128", "fc00::/7", "172.16.0.0/12"] reserved: ["127.0.0.0/8", "::1/128", "fc00::/7", "172.16.0.0/12"]
# TODO: GET /api/pleroma/captcha is broken # TODO: GET /api/pleroma/captcha is broken
# there was a nixpkgs PR to fix this around 2022/10 though. # there was a nixpkgs PR to fix this around 2022/10 though.
config :pleroma, Pleroma.Captcha, config :pleroma, Pleroma.Captcha,
enabled: false, enabled: false,
method: Pleroma.Captcha.Native method: Pleroma.Captcha.Native
# (enabled by colin) # (enabled by colin)
# Enable Strict-Transport-Security once SSL is working: # Enable Strict-Transport-Security once SSL is working:
config :pleroma, :http_security, config :pleroma, :http_security,
sts: true sts: true
# docs: https://docs.pleroma.social/backend/configuration/cheatsheet/#logger # docs: https://docs.pleroma.social/backend/configuration/cheatsheet/#logger
config :logger, config :logger,
backends: [{ExSyslogger, :ex_syslogger}] backends: [{ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger, config :logger, :ex_syslogger,
level: :${logLevel} level: :${logLevel}
# policies => list of message rewriting facilities to be enabled # policies => list of message rewriting facilities to be enabled
# transparence => whether to publish these rules in node_info (and /about) # transparence => whether to publish these rules in node_info (and /about)
config :pleroma, :mrf, config :pleroma, :mrf,
policies: [Pleroma.Web.ActivityPub.MRF.SimplePolicy], policies: [Pleroma.Web.ActivityPub.MRF.SimplePolicy],
transparency: true transparency: true
# reject => { host, reason } # reject => { host, reason }
config :pleroma, :mrf_simple, config :pleroma, :mrf_simple,
reject: [ {"threads.net", "megacorp"}, {"*.threads.net", "megacorp"} ] reject: [ {"threads.net", "megacorp"}, {"*.threads.net", "megacorp"} ]
# reject: [ [host: "threads.net", reason: "megacorp"], [host: "*.threads.net", reason: "megacorp"] ] # reject: [ [host: "threads.net", reason: "megacorp"], [host: "*.threads.net", reason: "megacorp"] ]
# XXX colin: not sure if this actually _does_ anything # XXX colin: not sure if this actually _does_ anything
# better to steal emoji from other instances? # better to steal emoji from other instances?
# - <https://docs.pleroma.social/backend/configuration/cheatsheet/#mrf_steal_emoji> # - <https://docs.pleroma.social/backend/configuration/cheatsheet/#mrf_steal_emoji>
config :pleroma, :emoji, config :pleroma, :emoji,
shortcode_globs: ["/emoji/**/*.png"], shortcode_globs: ["/emoji/**/*.png"],
groups: [ groups: [
"Cirno": "/emoji/cirno/*.png", "Cirno": "/emoji/cirno/*.png",
"Kirby": "/emoji/kirby/*.png", "Kirby": "/emoji/kirby/*.png",
"Bun": "/emoji/bun/*.png", "Bun": "/emoji/bun/*.png",
"Yuru Camp": "/emoji/yuru_camp/*.png", "Yuru Camp": "/emoji/yuru_camp/*.png",
] ]
'' ''
]; ];
systemd.services.pleroma.path = [ systemd.services.pleroma.path = [
# something inside pleroma invokes `sh` w/o specifying it by path, so this is needed to allow pleroma to start # something inside pleroma invokes `sh` w/o specifying it by path, so this is needed to allow pleroma to start
pkgs.bash pkgs.bash
# used by Pleroma to strip geo tags from uploads # used by Pleroma to strip geo tags from uploads
pkgs.exiftool pkgs.exiftool
# config.sane.programs.exiftool.package #< XXX(2024-10-20): breaks image uploading # config.sane.programs.exiftool.package #< XXX(2024-10-20): breaks image uploading
# i saw some errors when pleroma was shutting down about it not being able to find `awk`. probably not critical # i saw some errors when pleroma was shutting down about it not being able to find `awk`. probably not critical
# config.sane.programs.gawk.package # config.sane.programs.gawk.package
# needed for email operations like password reset # needed for email operations like password reset
pkgs.postfix pkgs.postfix
]; ];
systemd.services.pleroma = { systemd.services.pleroma = {
# postgres can be slow to service early requests, preventing pleroma from starting on the first try # postgres can be slow to service early requests, preventing pleroma from starting on the first try
serviceConfig.Restart = "on-failure"; serviceConfig.Restart = "on-failure";
serviceConfig.RestartSec = "10s"; serviceConfig.RestartSec = "10s";
# hardening (systemd-analyze security pleroma) # hardening (systemd-analyze security pleroma)
# XXX(2024-07-28): this hasn't been rigorously tested: # XXX(2024-07-28): this hasn't been rigorously tested:
# possible that i've set something too strict and won't notice right away # possible that i've set something too strict and won't notice right away
# make sure to test: # make sure to test:
# - image/media uploading # - image/media uploading
serviceConfig.CapabilityBoundingSet = lib.mkForce [ "" "" ]; # nixos default is `~CAP_SYS_ADMIN` serviceConfig.CapabilityBoundingSet = lib.mkForce [ "" "" ]; # nixos default is `~CAP_SYS_ADMIN`
serviceConfig.LockPersonality = true; serviceConfig.LockPersonality = true;
serviceConfig.NoNewPrivileges = true; serviceConfig.NoNewPrivileges = true;
serviceConfig.MemoryDenyWriteExecute = true; serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.PrivateDevices = lib.mkForce true; #< dunno why nixpkgs has this set false; it seems to work as true serviceConfig.PrivateDevices = lib.mkForce true; #< dunno why nixpkgs has this set false; it seems to work as true
serviceConfig.PrivateMounts = true; serviceConfig.PrivateMounts = true;
serviceConfig.PrivateTmp = true; serviceConfig.PrivateTmp = true;
serviceConfig.PrivateUsers = true; serviceConfig.PrivateUsers = true;
serviceConfig.ProtectProc = "invisible"; serviceConfig.ProtectProc = "invisible";
serviceConfig.ProcSubset = "all"; #< needs /proc/sys/kernel/overflowuid for bwrap serviceConfig.ProcSubset = "all"; #< needs /proc/sys/kernel/overflowuid for bwrap
serviceConfig.ProtectClock = true; serviceConfig.ProtectClock = true;
serviceConfig.ProtectControlGroups = true; serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true; serviceConfig.ProtectHome = true;
serviceConfig.ProtectKernelModules = true; serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectSystem = lib.mkForce "strict"; serviceConfig.ProtectSystem = lib.mkForce "strict";
serviceConfig.RemoveIPC = true; serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK"; serviceConfig.RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
serviceConfig.RestrictSUIDSGID = true; serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native"; serviceConfig.SystemCallArchitectures = "native";
serviceConfig.SystemCallFilter = [ "@system-service" "@mount" "@sandbox" ]; #< "sandbox" might not actually be necessary serviceConfig.SystemCallFilter = [ "@system-service" "@mount" "@sandbox" ]; #< "sandbox" might not actually be necessary
serviceConfig.ProtectHostname = false; #< else brap can't mount /proc serviceConfig.ProtectHostname = false; #< else brap can't mount /proc
serviceConfig.ProtectKernelLogs = false; #< else breaks exiftool ("bwrap: Can't mount proc on /newroot/proc: Operation not permitted") serviceConfig.ProtectKernelLogs = false; #< else breaks exiftool ("bwrap: Can't mount proc on /newroot/proc: Operation not permitted")
serviceConfig.ProtectKernelTunables = false; #< else breaks exiftool serviceConfig.ProtectKernelTunables = false; #< else breaks exiftool
serviceConfig.RestrictNamespaces = false; # media uploads require bwrap serviceConfig.RestrictNamespaces = false; # media uploads require bwrap
}; };
# this is required to allow pleroma to send email. # this is required to allow pleroma to send email.
# raw `sendmail` works, but i think pleroma's passing it some funny flags or something, idk. # raw `sendmail` works, but i think pleroma's passing it some funny flags or something, idk.
# hack to fix that. # hack to fix that.
users.users.pleroma.extraGroups = [ "postdrop" ]; users.users.pleroma.extraGroups = [ "postdrop" ];
# Pleroma server and web interface # Pleroma server and web interface
# TODO: enable publog? # TODO: enable publog?
services.nginx.virtualHosts."fed.uninsane.org" = { services.nginx.virtualHosts."fed.uninsane.org" = {
forceSSL = true; # pleroma redirects to https anyway forceSSL = true; # pleroma redirects to https anyway
enableACME = true; enableACME = true;
# inherit kTLS; # inherit kTLS;
locations."/" = { locations."/" = {
proxyPass = "http://127.0.0.1:4040"; proxyPass = "http://127.0.0.1:4040";
recommendedProxySettings = true; recommendedProxySettings = true;
# documented: https://git.pleroma.social/pleroma/pleroma/-/blob/develop/installation/pleroma.nginx # documented: https://git.pleroma.social/pleroma/pleroma/-/blob/develop/installation/pleroma.nginx
extraConfig = '' extraConfig = ''
# client_max_body_size defines the maximum upload size # client_max_body_size defines the maximum upload size
client_max_body_size 16m; client_max_body_size 16m;
''; '';
};
};
sane.dns.zones."uninsane.org".inet.CNAME."fed" = "native";
sops.secrets."pleroma_secrets" = {
owner = config.users.users.pleroma.name;
}; };
}; };
sane.dns.zones."uninsane.org".inet.CNAME."fed" = "native";
sops.secrets."pleroma_secrets" = {
owner = config.users.users.pleroma.name;
};
} }