diff --git a/hosts/by-name/servo/services/trust-dns.nix b/hosts/by-name/servo/services/trust-dns.nix index a65d0fe4..52a818e5 100644 --- a/hosts/by-name/servo/services/trust-dns.nix +++ b/hosts/by-name/servo/services/trust-dns.nix @@ -106,6 +106,27 @@ in hn = { substitutions = mkSubstitutions "hn"; listenAddrsIpv4 = [ nativeAddrs."servo.hn" ]; + enableRecursiveResolver = true; #< allow wireguard clients to use this as their DNS resolver + # extraConfig = { + # zones = [ + # { + # # forward the root zone to the local DNS resolver + # # to allow wireguard clients to use this as their DNS resolver + # zone = "."; + # zone_type = "Forward"; + # stores = { + # type = "forward"; + # name_servers = [ + # { + # socket_addr = "127.0.0.53:53"; + # protocol = "udp"; + # trust_nx_responses = true; + # } + # ]; + # }; + # } + # ]; + # }; }; lan = { substitutions = mkSubstitutions "lan"; @@ -118,43 +139,6 @@ in # nativeAddrs."servo.lan" # ]; # }; - # hn-resolver = { - # # don't need %AWAN% here because we forward to the hn instance. - # listenAddrsIpv4 = [ nativeAddrs."servo.hn" ]; - # extraConfig = { - # zones = [ - # { - # zone = "uninsane.org"; - # zone_type = "Forward"; - # stores = { - # type = "forward"; - # name_servers = [ - # { - # socket_addr = "${nativeAddrs."servo.hn"}:1053"; - # protocol = "udp"; - # trust_nx_responses = true; - # } - # ]; - # }; - # } - # { - # # forward the root zone to the local DNS resolver - # zone = "."; - # zone_type = "Forward"; - # stores = { - # type = "forward"; - # name_servers = [ - # { - # socket_addr = "127.0.0.53:53"; - # protocol = "udp"; - # trust_nx_responses = true; - # } - # ]; - # }; - # } - # ]; - # }; - # }; }; sane.services.dyn-dns.restartOnChange = [ @@ -162,6 +146,5 @@ in "trust-dns-hn.service" "trust-dns-lan.service" # "trust-dns-wan.service" - # "trust-dns-hn-resolver.service" # doesn't need restart because it doesn't know about WAN IP ]; } diff --git a/hosts/common/fs.nix b/hosts/common/fs.nix index e27d0e0b..b18ccff6 100644 --- a/hosts/common/fs.nix +++ b/hosts/common/fs.nix @@ -26,10 +26,6 @@ let # lazyMount: defer mounting until first access from userspace. # see: `man systemd.automount`, `man automount`, `man autofs` lazyMount = noauto ++ automount; - wg = [ - "x-systemd.requires=wireguard-wg-home.service" - "x-systemd.after=wireguard-wg-home.service" - ]; fuse = [ "allow_other" # allow users other than the one who mounts it to access it. needed, if systemd is the one mounting this fs (as root) @@ -136,9 +132,9 @@ let device = "ftp://servo-hn:/${subdir}"; noCheck = true; fsType = "fuse.curlftpfs"; - options = fsOpts.ftp ++ fsOpts.noauto ++ fsOpts.wg; + options = fsOpts.ftp ++ fsOpts.noauto; # fsType = "nfs"; - # options = fsOpts.nfs ++ fsOpts.lazyMount ++ fsOpts.wg; + # options = fsOpts.nfs ++ fsOpts.lazyMount; }; systemd.services."automount-servo-${utils.escapeSystemdPath subdir}" = let fs = config.fileSystems."/mnt/servo/${subdir}"; diff --git a/hosts/modules/derived-secrets/default.nix b/hosts/modules/derived-secrets/default.nix index 561421ae..d429c172 100644 --- a/hosts/modules/derived-secrets/default.nix +++ b/hosts/modules/derived-secrets/default.nix @@ -25,6 +25,14 @@ let type = types.str; default = "0600"; }; + acl.user = mkOption { + type = types.nullOr types.str; + default = null; + }; + acl.group = mkOption { + type = types.nullOr types.str; + default = null; + }; }; }; in @@ -51,6 +59,9 @@ in (builtins.toString (c.len * 2)) ]; generated.acl.mode = c.acl.mode; + generated.acl.user = lib.mkIf (c.acl.user != null) c.acl.user; + generated.acl.group = lib.mkIf (c.acl.group != null) c.acl.group; + wantedBeforeBy = [ "local-fs-pre.target" ]; }) cfg; }; } diff --git a/hosts/modules/wg-home.nix b/hosts/modules/wg-home.nix index 75b7ca12..730e1c50 100644 --- a/hosts/modules/wg-home.nix +++ b/hosts/modules/wg-home.nix @@ -9,7 +9,7 @@ let server-cfg = config.sane.hosts.by-name."servo".wg-home; mkPeer = { ips, pubkey, endpoint }: { publicKey = pubkey; - allowedIPs = builtins.map (k: "${k}/32") ips; + allowedIPs = builtins.map (k: if builtins.match ".*/.*" k != null then k else "${k}/32") ips; } // (lib.optionalAttrs (endpoint != null) { inherit endpoint; # send keepalives every 25 seconds to keep NAT routes live. @@ -29,7 +29,7 @@ let # make a single peer which routes all the given hosts mkServerPeer = hosts: mkPeer { inherit (server-cfg) pubkey endpoint; - ips = builtins.map (h: h.ip) hosts; + ips = (builtins.map (h: h.ip) hosts) ++ [ "0.0.0.0/0" ]; }; in { @@ -69,14 +69,16 @@ in sane.derived-secrets."/run/wg-home.priv" = { len = 32; encoding = "base64"; + acl.mode = "0640"; + acl.group = "systemd-network"; }; # wireguard VPN which allows everything on my domain to speak to each other even when # not behind a shared LAN. - # this config defines both the endpoint (server) and client configs + # also allows clients to proxy WAN traffic through it. + # this config defines both the endpoint (server) and client configs. - # for convenience, have both the server and client use the same port for their wireguard connections. - sane.ports.ports."51820" = { + sane.ports.ports."51820" = lib.mkIf (!cfg.routeThroughServo) { protocol = [ "udp" ]; visibleTo.lan = true; visibleTo.wan = cfg.visibleToWan; @@ -84,72 +86,51 @@ in description = "colin-wireguard"; }; - networking.wireguard.interfaces.wg-home = lib.mkMerge [ - { - listenPort = 51820; - privateKeyFile = "/run/wg-home.priv"; - # TODO: this make this `wants` and `after`, instead of manually starting it - preSetup = - let - gen-key = config.sane.fs."/run/wg-home.priv".unit; - in - "${pkgs.systemd}/bin/systemctl start '${gen-key}'"; + networking.wireguard.interfaces.wg-home = lib.mkIf (!cfg.routeThroughServo) ({ + listenPort = 51820; + privateKeyFile = "/run/wg-home.priv"; + # TODO: make this `wants` and `after`, instead of manually starting it + preSetup = + let + gen-key = config.sane.fs."/run/wg-home.priv".unit; + in + "${pkgs.systemd}/bin/systemctl start '${gen-key}'"; - ips = [ - "${cfg.ip}/24" - ]; + ips = [ + "${cfg.ip}/24" + ]; - peers = - let - all-peers = lib.mapAttrsToList (_: hostcfg: hostcfg.wg-home) config.sane.hosts.by-name; - peer-list = builtins.filter (p: p.ip != null && p.ip != cfg.ip && p.pubkey != null) all-peers; - in - if cfg.routeThroughServo then - # if acting as a client, then maintain a single peer -- the server -- which does the actual routing - [ (mkServerPeer peer-list) ] - else - # if acting as a server, route to each peer individually - mkClientPeers peer-list - ; - } - (lib.mkIf cfg.forwardToWan { - # documented here: - # TODO: don't hardcode eth0! - postSetup = '' - ${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING -s ${cfg.ip}/24 -o eth0 -j MASQUERADE - ''; - postShutdown = '' - ${pkgs.iptables}/bin/iptables -t nat -D POSTROUTING -s ${cfg.ip}/24 -o eth0 -j MASQUERADE - ''; - }) - ]; + peers = + let + all-peers = lib.mapAttrsToList (_: hostcfg: hostcfg.wg-home) config.sane.hosts.by-name; + peer-list = builtins.filter (p: p.ip != null && p.ip != cfg.ip && p.pubkey != null) all-peers; + in + mkClientPeers peer-list + ; + } // (lib.optionalAttrs cfg.forwardToWan { + # documented here: + # TODO: don't hardcode ens1! + postSetup = '' + ${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING -s ${cfg.ip}/24 -o ens1 -j MASQUERADE + ''; + postShutdown = '' + ${pkgs.iptables}/bin/iptables -t nat -D POSTROUTING -s ${cfg.ip}/24 -o ens1 -j MASQUERADE + ''; + })); - # also expose a wg-quick interface, so that one may `sane-vpn up servo` to route all traffic through servo - networking.wg-quick.interfaces.vpn-servo = { - address = [ cfg.ip ]; + # plug into my VPN abstractions so that one may: + # - `sane-vpn up wg-home` to route all traffic through servo + # - `sane-vpn do wg-home THING` to route select traffic through servo + sane.vpn.wg-home = lib.mkIf cfg.routeThroughServo { + id = 51; + endpoint = config.sane.hosts.by-name."servo".wg-home.endpoint; + publicKey = config.sane.hosts.by-name."servo".wg-home.pubkey; + addrV4 = cfg.ip; + subnetV4 = "24"; dns = [ config.sane.hosts.by-name."servo".wg-home.ip ]; privateKeyFile = "/run/wg-home.priv"; - - peers = [ - { - endpoint = config.sane.hosts.by-name."servo".wg-home.endpoint; - publicKey = config.sane.hosts.by-name."servo".wg-home.pubkey; - allowedIPs = [ - "0.0.0.0/0" - "::/0" - ]; - } - ]; - # to start: `systemctl start wg-quick-${name}` - autostart = false; - - # wg-home and vpn-servo interfaces interfere with the result that when connected to both, - # other wg-home users (lappy-hn, ...) aren't visible. disabling wg-home while the full - # vpn-servo is active allows wg-home users to be reachable again - preUp = "${pkgs.iproute2}/bin/ip link set wg-home down"; - postDown = "${pkgs.iproute2}/bin/ip link set wg-home up"; }; }; } diff --git a/modules/services/trust-dns/default.nix b/modules/services/trust-dns/default.nix index 36126999..55b62c39 100644 --- a/modules/services/trust-dns/default.nix +++ b/modules/services/trust-dns/default.nix @@ -87,14 +87,22 @@ let mkSystemdService = flavor: { includes, listenAddrsIpv4, listenAddrsIpv6, port, substitutions, extraConfig, ... }: let sed = "${pkgs.gnused}/bin/sed"; - configTemplate = toml.generate "trust-dns-${flavor}.toml" ( - ( - lib.filterAttrsRecursive (_: v: v != null) config.services.trust-dns.settings - ) // { - listen_addrs_ipv4 = listenAddrsIpv4; - listen_addrs_ipv6 = listenAddrsIpv6; - } // extraConfig - ); + baseConfig = ( + lib.filterAttrsRecursive (_: v: v != null) config.services.trust-dns.settings + ) // { + listen_addrs_ipv4 = listenAddrsIpv4; + listen_addrs_ipv6 = listenAddrsIpv6; + }; + configTemplate = toml.generate "trust-dns-${flavor}.toml" (baseConfig // + (lib.mapAttrs (k: v: + if k == "zones" then + # append to the baseConfig instead of overriding it + (baseConfig."${k}" or []) ++ v + else + v + ) + extraConfig + )); configPath = "/var/lib/trust-dns/${flavor}-config.toml"; sedArgs = builtins.map (key: ''-e "s/${key}/${substitutions."${key}"}/g"'') ( # HACK: %ANATIVE% often expands to one of the other subtitutions (e.g. %AWAN%) diff --git a/modules/vpn.nix b/modules/vpn.nix index 32ac6315..e40e095d 100644 --- a/modules/vpn.nix +++ b/modules/vpn.nix @@ -43,22 +43,27 @@ let }; fwmark = mkOption { type = types.int; + internal = true; }; # priority*: used externally, by e.g. `sane-vpn` priorityMain = mkOption { type = types.int; + internal = true; }; priorityWgTable = mkOption { type = types.int; + internal = true; }; priorityFwMark = mkOption { type = types.int; + internal = true; }; isDefault = mkOption { type = types.bool; description = '' read-only value: set based on whichever VPN has the lowest id. ''; + internal = true; }; endpoint = mkOption { type = types.str; @@ -80,6 +85,14 @@ let e.g. "172.27.12.34" ''; }; + subnetV4 = mkOption { + type = types.nullOr types.str; + description = '' + subnet dictating the range of IPs which should ALWAYS be routed through this VPN, no matter the system-wide settings. + ''; + example = "24"; + default = null; + }; dns = mkOption { type = types.listOf types.str; default = [ @@ -108,7 +121,7 @@ let priorityFwMark = config.id + 300; }; }); - mkVpnConfig = name: { id, dns, endpoint, publicKey, addrV4, privateKeyFile, priorityMain, priorityWgTable, priorityFwMark, fwmark, ... }: { + mkVpnConfig = name: { addrV4, dns, endpoint, fwmark, id, priorityMain, priorityWgTable, priorityFwMark, privateKeyFile, publicKey, subnetV4, ... }: { assertions = [ { assertion = (lib.count (c: c.id == id) (builtins.attrValues cfg)) == 1; @@ -151,10 +164,15 @@ let Scope = "link"; Destination = "0.0.0.0/0"; Source = addrV4; + }] ++ lib.optionals (subnetV4 != null) [{ + Scope = "link"; + Destination = "${addrV4}/${subnetV4}"; + Source = addrV4; }]; # RequiredForOnline => should `systemd-networkd-wait-online` fail if this network can't come up? linkConfig.RequiredForOnline = false; }; + systemd.network.config.networkConfig.ManageForeignRoutingPolicyRules = false; # linux will drop inbound packets if it thinks a reply to that packet wouldn't exit via the same interface (rpfilter). # wg-quick has a solution via `iptables -j CONNMARK`, and that does work for system-wide VPNs,