tensorrt: init at 8.4.0.6

Add derivation for TensorRT 8, a high-performance deep learning interface SDK
from NVIDIA, which is at this point non-redistributable.  The current version
aldo requires CUDA 11, so this is left out of the cudaPackages_10* scopes.
This commit is contained in:
Aidan Gauland 2022-06-21 15:36:41 +12:00
parent a19f2c688b
commit d70b4df686
No known key found for this signature in database
GPG Key ID: 16E68DD2D0E77C91
4 changed files with 146 additions and 2 deletions

View File

@ -0,0 +1,79 @@
{ lib
, stdenv
, requireFile
, autoPatchelfHook
, autoAddOpenGLRunpathHook
, cudaVersion
, cudatoolkit
, cudnn
}:
assert lib.assertMsg (lib.strings.versionAtLeast cudaVersion "11.0")
"This version of TensorRT requires at least CUDA 11.0 (current version is ${cudaVersion})";
assert lib.assertMsg (lib.strings.versionAtLeast cudnn.version "8.3")
"This version of TensorRT requires at least cuDNN 8.3 (current version is ${cudnn.version})";
stdenv.mkDerivation rec {
pname = "cudatoolkit-${cudatoolkit.majorVersion}-tensorrt";
version = "8.4.0.6";
src = requireFile rec {
name = "TensorRT-${version}.Linux.x86_64-gnu.cuda-11.6.cudnn8.3.tar.gz";
sha256 = "sha256-DNgHHXF/G4cK2nnOWImrPXAkOcNW6Wy+8j0LRpAH/LQ=";
message = ''
To use the TensorRT derivation, you must join the NVIDIA Developer Program
and download the ${version} Linux x86_64 TAR package from
${meta.homepage}.
Once you have downloaded the file, add it to the store with the following
command, and try building this derivation again.
$ nix-store --add-fixed sha256 ${name}
'';
};
outputs = [ "out" "dev" ];
nativeBuildInputs = [
autoPatchelfHook
autoAddOpenGLRunpathHook
];
# Used by autoPatchelfHook
buildInputs = [
cudatoolkit.cc.cc.lib # libstdc++
cudatoolkit
cudnn
];
sourceRoot = "TensorRT-${version}";
installPhase = ''
install --directory "$dev" "$out"
mv include "$dev"
mv targets/x86_64-linux-gnu/lib "$out"
install -D --target-directory="$out/bin" targets/x86_64-linux-gnu/bin/trtexec
'';
# Tell autoPatchelf about runtime dependencies.
# (postFixup phase is run before autoPatchelfHook.)
postFixup =
let
mostOfVersion = builtins.concatStringsSep "."
(lib.take 3 (lib.versions.splitVersion version));
in
''
echo 'Patching RPATH of libnvinfer libs'
patchelf --debug --add-needed libnvinfer.so \
"$out/lib/libnvinfer.so.${mostOfVersion}" \
"$out/lib/libnvinfer_plugin.so.${mostOfVersion}" \
"$out/lib/libnvinfer_builder_resource.so.${mostOfVersion}"
'';
meta = with lib; {
description = "TensorRT: a high-performance deep learning interface";
homepage = "https://developer.nvidia.com/tensorrt";
license = licenses.unfree;
platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ aidalgol ];
};
}

View File

@ -0,0 +1,52 @@
{ lib
, python
, buildPythonPackage
, autoPatchelfHook
, unzip
, cudaPackages
}:
let
pyVersion = "${lib.versions.major python.version}${lib.versions.minor python.version}";
in
buildPythonPackage rec {
pname = "tensorrt";
version = cudaPackages.tensorrt.version;
src = cudaPackages.tensorrt.src;
format = "wheel";
# We unpack the wheel ourselves because of the odd packaging.
dontUseWheelUnpack = true;
nativeBuildInputs = [
unzip
autoPatchelfHook
cudaPackages.autoAddOpenGLRunpathHook
];
preUnpack = ''
mkdir -p dist
tar --strip-components=2 -xf "$src" --directory=dist \
"TensorRT-${version}/python/tensorrt-${version}-cp${pyVersion}-none-linux_x86_64.whl"
'';
sourceRoot = ".";
buildInputs = [
cudaPackages.cudnn
cudaPackages.tensorrt
];
pythonCheckImports = [
"tensorrt"
];
meta = with lib; {
description = "Python bindings for TensorRT, a high-performance deep learning interface";
homepage = "https://developer.nvidia.com/tensorrt";
license = licenses.unfree;
platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ aidalgol ];
};
}

View File

@ -43,6 +43,16 @@ let
};
in { inherit cutensor; };
tensorrtExtension = final: prev: let
### Tensorrt
inherit (final) cudaMajorMinorVersion cudaMajorVersion;
# TODO: Add derivations for TensorRT versions that support older CUDA versions.
tensorrt = final.callPackage ../development/libraries/science/math/tensorrt/8.nix { };
in { inherit tensorrt; };
extraPackagesExtension = final: prev: {
nccl = final.callPackage ../development/libraries/science/math/nccl { };
@ -58,7 +68,7 @@ let
};
composedExtension = composeManyExtensions [
composedExtension = composeManyExtensions ([
extraPackagesExtension
(import ../development/compilers/cudatoolkit/extension.nix)
(import ../development/compilers/cudatoolkit/redist/extension.nix)
@ -67,6 +77,7 @@ let
(import ../test/cuda/cuda-samples/extension.nix)
(import ../test/cuda/cuda-library-samples/extension.nix)
cutensorExtension
];
] ++ (lib.optional (lib.strings.versionAtLeast cudaVersion "11.0") tensorrtExtension));
# We only package the current version of TensorRT, which requires CUDA 11.
in (scope.overrideScope' composedExtension)

View File

@ -10384,6 +10384,8 @@ in {
tensorly = callPackage ../development/python-modules/tensorly { };
tensorrt = callPackage ../development/python-modules/tensorrt { };
tellduslive = callPackage ../development/python-modules/tellduslive { };
termcolor = callPackage ../development/python-modules/termcolor { };