nixpkgs/pkgs/top-level/cuda-packages.nix
Connor Baker 8e800cedaf cudaPackages: move derivations to cuda-modules & support aarch64
cudaPackages.cuda_compat: ignore missing libs provided at runtime

cudaPackages.gpus: Jetson should never build by default

cudaPackages.flags: don't build Jetson capabilities by default

cudaPackages: re-introduce filter for pre-existing CUDA redist packages in overrides

cudaPackages: only recurseIntoAttrs for the latest of each major version

cudaPackages.nvccCompatabilities: use GCC 10 through CUDA 11.5 to avoid a GLIBC incompatability

cudaPackages.cutensor: acquire libcublas through cudatoolkit prior to 11.4

cudaPackages.cuda_compat: mark as broken on aarch64-linux if not targeting Jetson

cudaPackages.cutensor_1_4: fix build

cudaPackages: adjust use of autoPatchelfIgnoreMissingDeps

cudaPackages.cuda_nvprof: remove unecessary override to add addOpenGLRunpath

cudaPackages: use getExe' to avoid patchelf warning about missing meta.mainProgram

cudaPackages: fix evaluation with Nix 2.3

cudaPackages: fix platform detection for Jetson/non-Jetson aarch64-linux

python3Packages.tensorrt: mark as broken if required packages are missing

Note: evaluating the name of the derivation will fail if tensorrt is not present,
which is why we wrap the value in `lib.optionalString`.

cudaPackages.flags.getNixSystem: add guard based on jetsonTargets

cudaPackages.cudnn: use explicit path to patchelf

cudaPackages.tensorrt: use explicit path to patchelf
2023-12-07 16:45:54 +00:00

119 lines
4.8 KiB
Nix

# Notes:
#
# Silvan (Tweag) covered some things on recursive attribute sets in the Nix Hour:
# https://www.youtube.com/watch?v=BgnUFtd1Ivs
#
# I (@connorbaker) highly recommend watching it.
#
# Most helpful comment regarding recursive attribute sets:
#
# https://github.com/NixOS/nixpkgs/pull/256324#issuecomment-1749935979
#
# To summarize:
#
# - `prev` should only be used to access attributes which are going to be overriden.
# - `final` should only be used to access `callPackage` to build new packages.
# - Attribute names should be computable without relying on `final`.
# - Extensions should take arguments to build attribute names before relying on `final`.
#
# Silvan's recommendation then is to explicitly use `callPackage` to provide everything our extensions need
# to compute the attribute names, without relying on `final`.
#
# I've (@connorbaker) attempted to do that, though I'm unsure of how this will interact with overrides.
{
callPackage,
cudaVersion,
lib,
newScope,
pkgs,
}:
let
inherit (lib)
attrsets
customisation
fixedPoints
strings
versions
;
# Backbone
gpus = builtins.import ../development/cuda-modules/gpus.nix;
nvccCompatibilities = builtins.import ../development/cuda-modules/nvcc-compatibilities.nix;
flags = callPackage ../development/cuda-modules/flags.nix {inherit cudaVersion gpus;};
passthruFunction =
final:
(
{
inherit cudaVersion lib pkgs;
inherit gpus nvccCompatibilities flags;
cudaMajorVersion = versions.major cudaVersion;
cudaMajorMinorVersion = versions.majorMinor cudaVersion;
# Maintain a reference to the final cudaPackages.
# Without this, if we use `final.callPackage` and a package accepts `cudaPackages` as an argument,
# it's provided with `cudaPackages` from the top-level scope, which is not what we want. We want to
# provide the `cudaPackages` from the final scope -- that is, the *current* scope.
cudaPackages = final;
# TODO(@connorbaker): `cudaFlags` is an alias for `flags` which should be removed in the future.
cudaFlags = flags;
# Exposed as cudaPackages.backendStdenv.
# This is what nvcc uses as a backend,
# and it has to be an officially supported one (e.g. gcc11 for cuda11).
#
# It, however, propagates current stdenv's libstdc++ to avoid "GLIBCXX_* not found errors"
# when linked with other C++ libraries.
# E.g. for cudaPackages_11_8 we use gcc11 with gcc12's libstdc++
# Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context
backendStdenv = final.callPackage ../development/cuda-modules/backend-stdenv.nix {};
# Loose packages
cudatoolkit = final.callPackage ../development/cuda-modules/cudatoolkit {};
saxpy = final.callPackage ../development/cuda-modules/saxpy {};
}
# NCCL is not supported on Jetson, because it does not use NVLink or PCI-e for inter-GPU communication.
# https://forums.developer.nvidia.com/t/can-jetson-orin-support-nccl/232845/9
// attrsets.optionalAttrs (!flags.isJetsonBuild) {
nccl = final.callPackage ../development/cuda-modules/nccl {};
nccl-tests = final.callPackage ../development/cuda-modules/nccl-tests {};
}
);
mkVersionedPackageName =
name: version:
strings.concatStringsSep "_" [
name
(strings.replaceStrings ["."] ["_"] (versions.majorMinor version))
];
composedExtension = fixedPoints.composeManyExtensions [
(import ../development/cuda-modules/setup-hooks/extension.nix)
(callPackage ../development/cuda-modules/cuda/extension.nix {inherit cudaVersion;})
(callPackage ../development/cuda-modules/cuda/overrides.nix {inherit cudaVersion;})
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "cudnn";
releasesModule = ../development/cuda-modules/cudnn/releases.nix;
shimsFn = ../development/cuda-modules/cudnn/shims.nix;
fixupFn = ../development/cuda-modules/cudnn/fixup.nix;
})
(callPackage ../development/cuda-modules/cutensor/extension.nix {
inherit cudaVersion flags mkVersionedPackageName;
})
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "tensorrt";
releasesModule = ../development/cuda-modules/tensorrt/releases.nix;
shimsFn = ../development/cuda-modules/tensorrt/shims.nix;
fixupFn = ../development/cuda-modules/tensorrt/fixup.nix;
})
(callPackage ../test/cuda/cuda-samples/extension.nix {inherit cudaVersion;})
(callPackage ../test/cuda/cuda-library-samples/extension.nix {})
];
cudaPackages = customisation.makeScope newScope (
fixedPoints.extends composedExtension passthruFunction
);
in
cudaPackages