Merge pull request #285522 from markuskowa/fix-closure-mpi

openmpi: split dev into separate output, reduce closure size
This commit is contained in:
markuskowa 2024-02-15 15:21:03 +01:00 committed by GitHub
commit 3630c552a3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 44 additions and 19 deletions

View File

@ -45,7 +45,7 @@ let
'';
in pkgs.runCommand "mpitest" {} ''
mkdir -p $out/bin
${pkgs.openmpi}/bin/mpicc ${mpitestC} -o $out/bin/mpitest
${lib.getDev pkgs.mpi}/bin/mpicc ${mpitestC} -o $out/bin/mpitest
'';
in {
name = "slurm";

View File

@ -168,7 +168,7 @@ stdenv.mkDerivation {
preConfigure = lib.optionalString useMpi ''
cat << EOF >> user-config.jam
using mpi : ${mpi}/bin/mpiCC ;
using mpi : ${lib.getDev mpi}/bin/mpiCC ;
EOF
''
# On darwin we need to add the `$out/lib` to the libraries' rpath explicitly,

View File

@ -59,7 +59,7 @@ in stdenv.mkDerivation rec {
"--disable-dap-remote-tests"
"--with-plugin-dir=${placeholder "out"}/lib/hdf5-plugins"
]
++ (lib.optionals mpiSupport [ "--enable-parallel-tests" "CC=${mpi}/bin/mpicc" ]);
++ (lib.optionals mpiSupport [ "--enable-parallel-tests" "CC=${lib.getDev mpi}/bin/mpicc" ]);
enableParallelBuilding = true;

View File

@ -1,6 +1,6 @@
{ lib, stdenv, fetchurl, gfortran, perl, libnl
{ lib, stdenv, fetchurl, removeReferencesTo, gfortran, perl, libnl
, rdma-core, zlib, numactl, libevent, hwloc, targetPackages, symlinkJoin
, libpsm2, libfabric, pmix, ucx, ucc
, libpsm2, libfabric, pmix, ucx, ucc, makeWrapper
, config
# Enable CUDA support
, cudaSupport ? config.cudaSupport, cudaPackages
@ -38,7 +38,7 @@ stdenv.mkDerivation rec {
find -name "Makefile.in" -exec sed -i "s/\`date\`/$ts/" \{} \;
'';
outputs = [ "out" "man" ];
outputs = [ "out" "man" "dev" ];
buildInputs = [ zlib ]
++ lib.optionals stdenv.isLinux [ libnl numactl pmix ucx ucc ]
@ -47,7 +47,7 @@ stdenv.mkDerivation rec {
++ lib.optional (stdenv.isLinux || stdenv.isFreeBSD) rdma-core
++ lib.optionals fabricSupport [ libpsm2 libfabric ];
nativeBuildInputs = [ perl ]
nativeBuildInputs = [ perl removeReferencesTo makeWrapper ]
++ lib.optionals cudaSupport [ cudaPackages.cuda_nvcc ]
++ lib.optionals fortranSupport [ gfortran ];
@ -71,24 +71,51 @@ stdenv.mkDerivation rec {
postInstall = ''
find $out/lib/ -name "*.la" -exec rm -f \{} \;
for f in mpi shmem osh; do
for i in f77 f90 CC c++ cxx cc fort; do
moveToOutput "bin/$f$i" "''${!outputDev}"
echo "move $fi$i"
moveToOutput "share/openmpi/$f$i-wrapper-data.txt" "''${!outputDev}"
done
done
for i in ortecc orte-info ompi_info oshmem_info opal_wrapper; do
moveToOutput "bin/$i" "''${!outputDev}"
done
moveToOutput "share/openmpi/ortecc-wrapper-data.txt" "''${!outputDev}"
'';
postFixup = ''
remove-references-to -t $dev $(readlink -f $out/lib/libopen-pal${stdenv.hostPlatform.extensions.sharedLibrary})
remove-references-to -t $man $(readlink -f $out/lib/libopen-pal${stdenv.hostPlatform.extensions.sharedLibrary})
# The path to the wrapper is hard coded in libopen-pal.so, which we just cleared.
wrapProgram $dev/bin/opal_wrapper \
--set OPAL_INCLUDEDIR $dev/include \
--set OPAL_PKGDATADIR $dev/share/openmpi
# default compilers should be indentical to the
# compilers at build time
echo "$dev/share/openmpi/mpicc-wrapper-data.txt"
sed -i 's:compiler=.*:compiler=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}cc:' \
$out/share/openmpi/mpicc-wrapper-data.txt
$dev/share/openmpi/mpicc-wrapper-data.txt
echo "$dev/share/openmpi/ortecc-wrapper-data.txt"
sed -i 's:compiler=.*:compiler=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}cc:' \
$out/share/openmpi/ortecc-wrapper-data.txt
$dev/share/openmpi/ortecc-wrapper-data.txt
echo "$dev/share/openmpi/mpic++-wrapper-data.txt"
sed -i 's:compiler=.*:compiler=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}c++:' \
$out/share/openmpi/mpic++-wrapper-data.txt
$dev/share/openmpi/mpic++-wrapper-data.txt
'' + lib.optionalString fortranSupport ''
echo "$dev/share/openmpi/mpifort-wrapper-data.txt"
sed -i 's:compiler=.*:compiler=${gfortran}/bin/${gfortran.targetPrefix}gfortran:' \
$out/share/openmpi/mpifort-wrapper-data.txt
$dev/share/openmpi/mpifort-wrapper-data.txt
'';
doCheck = true;

View File

@ -28,7 +28,7 @@ stdenv.mkDerivation rec {
export PATH=$PATH:${mpi}/bin
configureFlagsArray+=(
--with-mpi=${mpi}
--with-mpi=${lib.getDev mpi}
CC=mpicc
FC=mpif90
MPICC=mpicc

View File

@ -51,7 +51,7 @@ stdenv.mkDerivation rec {
-DBUILD_SHARED_LIBS=ON -DBUILD_STATIC_LIBS=OFF
-DLAPACK_LIBRARIES="-llapack"
-DBLAS_LIBRARIES="-lblas"
-DCMAKE_Fortran_COMPILER=${mpi}/bin/mpif90
-DCMAKE_Fortran_COMPILER=${lib.getDev mpi}/bin/mpif90
${lib.optionalString passthru.isILP64 ''
-DCMAKE_Fortran_FLAGS="-fdefault-integer-8"
-DCMAKE_C_FLAGS="-DInt=long"

View File

@ -29,8 +29,8 @@ let
text = ''
# Compiler
compiler = 'gcc'
mpicompiler = '${mpi}/bin/mpicc'
mpilinker = '${mpi}/bin/mpicc'
mpicompiler = '${lib.getDev mpi}/bin/mpicc'
mpilinker = '${lib.getDev mpi}/bin/mpicc'
# BLAS
libraries += ['blas']

View File

@ -47,7 +47,7 @@ in buildPythonPackage rec {
${lib.optionalString mpiSupport "export OMPI_MCA_rmaps_base_oversubscribe=yes"}
'';
preBuild = lib.optionalString mpiSupport "export CC=${mpi}/bin/mpicc";
preBuild = lib.optionalString mpiSupport "export CC=${lib.getDev mpi}/bin/mpicc";
nativeBuildInputs = [
cython

View File

@ -38,8 +38,6 @@ buildPythonPackage rec {
# work as expected
'';
setupPyBuildFlags = ["--mpicc=${mpi}/bin/mpicc"];
nativeBuildInputs = [ mpi ];
__darwinAllowLocalNetworking = true;

View File

@ -76,7 +76,7 @@ stdenv.mkDerivation rec {
++ lib.optional cppSupport "-DHDF5_BUILD_CPP_LIB=ON"
++ lib.optional fortranSupport "-DHDF5_BUILD_FORTRAN=ON"
++ lib.optional szipSupport "-DHDF5_ENABLE_SZIP_SUPPORT=ON"
++ lib.optionals mpiSupport [ "-DHDF5_ENABLE_PARALLEL=ON" "CC=${mpi}/bin/mpicc" ]
++ lib.optionals mpiSupport [ "-DHDF5_ENABLE_PARALLEL=ON" ]
++ lib.optional enableShared "-DBUILD_SHARED_LIBS=ON"
++ lib.optional javaSupport "-DHDF5_BUILD_JAVA=ON"
++ lib.optional usev110Api "-DDEFAULT_API_VERSION=v110"