From 469aec905bab3be98838c7eb996ceffb2ea44404 Mon Sep 17 00:00:00 2001 From: zowoq <59103226+zowoq@users.noreply.github.com> Date: Thu, 18 Aug 2022 13:58:03 +1000 Subject: [PATCH] nixos/podman, podman: switch to `netavark` network stack --- .../from_md/release-notes/rl-2305.section.xml | 9 ++++ .../manual/release-notes/rl-2305.section.md | 2 + .../modules/virtualisation/podman/default.nix | 47 ++++++++++--------- .../modules/virtualisation/podman/dnsname.nix | 36 -------------- nixos/tests/all-tests.nix | 1 - nixos/tests/podman/default.nix | 25 ++++++++++ nixos/tests/podman/dnsname.nix | 42 ----------------- .../virtualization/podman/default.nix | 2 - .../virtualization/podman/wrapper.nix | 6 ++- 9 files changed, 65 insertions(+), 105 deletions(-) delete mode 100644 nixos/modules/virtualisation/podman/dnsname.nix delete mode 100644 nixos/tests/podman/dnsname.nix diff --git a/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml b/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml index 4fb5749e71c8..12e4d490300e 100644 --- a/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml +++ b/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml @@ -115,6 +115,15 @@ services.borgbackup.jobs.<name>.inhibitsSleep. + + + podman now uses the + netavark network stack. Users will need to + delete all of their local containers, images, volumes, etc, by + running podman system reset --force once + before upgrading their systems. + + The EC2 image module no longer fetches instance metadata in diff --git a/nixos/doc/manual/release-notes/rl-2305.section.md b/nixos/doc/manual/release-notes/rl-2305.section.md index b5c9c4ceb55d..07ee346c2c87 100644 --- a/nixos/doc/manual/release-notes/rl-2305.section.md +++ b/nixos/doc/manual/release-notes/rl-2305.section.md @@ -40,6 +40,8 @@ In addition to numerous new and upgraded packages, this release has the followin - `borgbackup` module now has an option for inhibiting system sleep while backups are running, defaulting to off (not inhibiting sleep), available as [`services.borgbackup.jobs..inhibitsSleep`](#opt-services.borgbackup.jobs._name_.inhibitsSleep). +- `podman` now uses the `netavark` network stack. Users will need to delete all of their local containers, images, volumes, etc, by running `podman system reset --force` once before upgrading their systems. + - The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services. This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service` diff --git a/nixos/modules/virtualisation/podman/default.nix b/nixos/modules/virtualisation/podman/default.nix index 13bbb4471ea5..6c00fabaa185 100644 --- a/nixos/modules/virtualisation/podman/default.nix +++ b/nixos/modules/virtualisation/podman/default.nix @@ -1,7 +1,6 @@ { config, lib, pkgs, ... }: let cfg = config.virtualisation.podman; - toml = pkgs.formats.toml { }; json = pkgs.formats.json { }; inherit (lib) mkOption types; @@ -27,24 +26,13 @@ let done ''; - net-conflist = pkgs.runCommand "87-podman-bridge.conflist" - { - nativeBuildInputs = [ pkgs.jq ]; - extraPlugins = builtins.toJSON cfg.defaultNetwork.extraPlugins; - jqScript = '' - . + { "plugins": (.plugins + $extraPlugins) } - ''; - } '' - jq <${cfg.package}/etc/cni/net.d/87-podman-bridge.conflist \ - --argjson extraPlugins "$extraPlugins" \ - "$jqScript" \ - >$out - ''; - in { imports = [ - ./dnsname.nix + (lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "dnsname" ] + "Use virtualisation.podman.defaultNetwork.settings.dns_enabled instead.") + (lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "extraPlugins" ] + "Netavark isn't compatible with CNI plugins.") ./network-socket.nix ]; @@ -149,11 +137,11 @@ in ''; }; - defaultNetwork.extraPlugins = lib.mkOption { - type = types.listOf json.type; - default = [ ]; + defaultNetwork.settings = lib.mkOption { + type = json.type; + default = { }; description = lib.mdDoc '' - Extra CNI plugin configurations to add to podman's default network. + Settings for podman's default network. ''; }; @@ -164,11 +152,26 @@ in environment.systemPackages = [ cfg.package ] ++ lib.optional cfg.dockerCompat dockerCompat; - environment.etc."cni/net.d/87-podman-bridge.conflist".source = net-conflist; + # https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network + environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) { + source = json.generate "podman.json" ({ + dns_enabled = false; + driver = "bridge"; + id = "0000000000000000000000000000000000000000000000000000000000000000"; + internal = false; + ipam_options = { driver = "host-local"; }; + ipv6_enabled = false; + name = "podman"; + network_interface = "podman0"; + subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }]; + } // cfg.defaultNetwork.settings); + }; virtualisation.containers = { enable = true; # Enable common /etc/containers configuration - containersConf.settings = lib.optionalAttrs cfg.enableNvidia { + containersConf.settings = { + network.network_backend = "netavark"; + } // lib.optionalAttrs cfg.enableNvidia { engine = { conmon_env_vars = [ "PATH=${lib.makeBinPath [ pkgs.nvidia-podman ]}" ]; runtimes.nvidia = [ "${pkgs.nvidia-podman}/bin/nvidia-container-runtime" ]; diff --git a/nixos/modules/virtualisation/podman/dnsname.nix b/nixos/modules/virtualisation/podman/dnsname.nix deleted file mode 100644 index 3e7d35ae1e44..000000000000 --- a/nixos/modules/virtualisation/podman/dnsname.nix +++ /dev/null @@ -1,36 +0,0 @@ -{ config, lib, pkgs, ... }: -let - inherit (lib) - mkOption - mkIf - types - ; - - cfg = config.virtualisation.podman; - -in -{ - options = { - virtualisation.podman = { - - defaultNetwork.dnsname.enable = mkOption { - type = types.bool; - default = false; - description = lib.mdDoc '' - Enable DNS resolution in the default podman network. - ''; - }; - - }; - }; - - config = { - virtualisation.containers.containersConf.cniPlugins = mkIf cfg.defaultNetwork.dnsname.enable [ pkgs.dnsname-cni ]; - virtualisation.podman.defaultNetwork.extraPlugins = - lib.optional cfg.defaultNetwork.dnsname.enable { - type = "dnsname"; - domainName = "dns.podman"; - capabilities.aliases = true; - }; - }; -} diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix index 75f01d888b21..e9b58c7c4f77 100644 --- a/nixos/tests/all-tests.nix +++ b/nixos/tests/all-tests.nix @@ -527,7 +527,6 @@ in { plotinus = handleTest ./plotinus.nix {}; podgrab = handleTest ./podgrab.nix {}; podman = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/default.nix {}; - podman-dnsname = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/dnsname.nix {}; podman-tls-ghostunnel = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/tls-ghostunnel.nix {}; polaris = handleTest ./polaris.nix {}; pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {}; diff --git a/nixos/tests/podman/default.nix b/nixos/tests/podman/default.nix index 106ba2057d06..c2ea399d65af 100644 --- a/nixos/tests/podman/default.nix +++ b/nixos/tests/podman/default.nix @@ -13,6 +13,13 @@ import ../make-test-python.nix ( isNormalUser = true; }; }; + dns = { pkgs, ... }: { + virtualisation.podman.enable = true; + + virtualisation.podman.defaultNetwork.settings.dns_enabled = true; + + networking.firewall.allowedUDPPorts = [ 53 ]; + }; docker = { pkgs, ... }: { virtualisation.podman.enable = true; @@ -43,6 +50,7 @@ import ../make-test-python.nix ( podman.wait_for_unit("sockets.target") + dns.wait_for_unit("sockets.target") docker.wait_for_unit("sockets.target") start_all() @@ -120,6 +128,23 @@ import ../make-test-python.nix ( pid = podman.succeed("podman run --rm --init busybox readlink /proc/self").strip() assert pid == "2" + with subtest("aardvark-dns"): + dns.succeed("tar cv --files-from /dev/null | podman import - scratchimg") + dns.succeed( + "podman run -d --name=webserver -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${pkgs.writeTextDir "index.html" "

Hi

"} scratchimg ${pkgs.python3}/bin/python -m http.server 8000" + ) + dns.succeed("podman ps | grep webserver") + dns.succeed(""" + for i in `seq 0 120`; do + podman run --rm --name=client -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg ${pkgs.curl}/bin/curl http://webserver:8000 >/dev/console \ + && exit 0 + sleep 0.5 + done + exit 1 + """) + dns.succeed("podman stop webserver") + dns.succeed("podman rm webserver") + with subtest("A podman member can use the docker cli"): docker.succeed(su_cmd("docker version")) diff --git a/nixos/tests/podman/dnsname.nix b/nixos/tests/podman/dnsname.nix deleted file mode 100644 index 3768ae79e067..000000000000 --- a/nixos/tests/podman/dnsname.nix +++ /dev/null @@ -1,42 +0,0 @@ -import ../make-test-python.nix ( - { pkgs, lib, ... }: - let - inherit (pkgs) writeTextDir python3 curl; - webroot = writeTextDir "index.html" "

Hi

"; - in - { - name = "podman-dnsname"; - meta = { - maintainers = with lib.maintainers; [ roberth ] ++ lib.teams.podman.members; - }; - - nodes = { - podman = { pkgs, ... }: { - virtualisation.podman.enable = true; - virtualisation.podman.defaultNetwork.dnsname.enable = true; - }; - }; - - testScript = '' - podman.wait_for_unit("sockets.target") - - with subtest("DNS works"): # also tests inter-container tcp routing - podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg") - podman.succeed( - "podman run -d --name=webserver -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${webroot} scratchimg ${python3}/bin/python -m http.server 8000" - ) - podman.succeed("podman ps | grep webserver") - podman.succeed(""" - for i in `seq 0 120`; do - podman run --rm --name=client -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg ${curl}/bin/curl http://webserver:8000 >/dev/console \ - && exit 0 - sleep 0.5 - done - exit 1 - """) - podman.succeed("podman stop webserver") - podman.succeed("podman rm webserver") - - ''; - } -) diff --git a/pkgs/applications/virtualization/podman/default.nix b/pkgs/applications/virtualization/podman/default.nix index 3fed07237f75..f6cc83fde17c 100644 --- a/pkgs/applications/virtualization/podman/default.nix +++ b/pkgs/applications/virtualization/podman/default.nix @@ -68,7 +68,6 @@ buildGoModule rec { ${if stdenv.isDarwin then '' mv bin/{darwin/podman,podman} '' else '' - install -Dm644 cni/87-podman-bridge.conflist -t $out/etc/cni/net.d install -Dm644 contrib/tmpfile/podman.conf -t $out/lib/tmpfiles.d for s in contrib/systemd/**/*.in; do substituteInPlace "$s" --replace "@@PODMAN@@" "podman" # don't use unwrapped binary @@ -92,7 +91,6 @@ buildGoModule rec { # related modules inherit (nixosTests) podman-tls-ghostunnel - podman-dnsname ; oci-containers-podman = nixosTests.oci-containers.podman; }; diff --git a/pkgs/applications/virtualization/podman/wrapper.nix b/pkgs/applications/virtualization/podman/wrapper.nix index d0131eacdd37..7fe483a7079e 100644 --- a/pkgs/applications/virtualization/podman/wrapper.nix +++ b/pkgs/applications/virtualization/podman/wrapper.nix @@ -15,12 +15,12 @@ , iproute2 , catatonit , gvproxy +, aardvark-dns +, netavark }: # do not add qemu to this wrapper, store paths get written to the podman vm config and break when GCed -# adding aardvark-dns/netavark to `helpersBin` requires changes to the modules and tests - let binPath = lib.makeBinPath ([ ] ++ lib.optionals stdenv.isLinux [ @@ -41,7 +41,9 @@ let paths = [ gvproxy ] ++ lib.optionals stdenv.isLinux [ + aardvark-dns catatonit # added here for the pause image and also set in `containersConf` for `init_path` + netavark podman-unwrapped.rootlessport ]; };