Skip to content
Snippets Groups Projects
Commit cd2381ac authored by Martin Lang's avatar Martin Lang
Browse files

Use ucx~vfs

With the vfs option enabled we observed random segmentation faults such
as the one described in
https://gitlab.gwdg.de/mpsd-cs/tickets/-/issues/38
Disabling the option seems to fix the problem.
parent 7423c5ca
No related branches found
No related tags found
2 merge requests!96Add fixes to 24a release,!95Use ucx~vfs
......@@ -3,7 +3,7 @@ spack:
- compilers:
- "##TOOLCHAIN_COMPILER##"
- mpis:
- openmpi@4.1.4+cuda cuda_arch=70 fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem
- openmpi@4.1.4+cuda cuda_arch=70 fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem
^cuda@11.4+allow-unsupported-compilers
- packages:
- binutils@2.38+headers+ld
......
......@@ -3,7 +3,7 @@ spack:
- compilers:
- "##TOOLCHAIN_COMPILER##"
- mpis:
- openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem
- openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem
- packages:
- binutils@2.38+headers+ld
- openblas@0.3.20
......
......@@ -3,7 +3,7 @@ spack:
- compilers:
- "##TOOLCHAIN_COMPILER##"
- mpis:
- openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem
- openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem
- packages:
- binutils@2.39+headers+ld
- openblas@0.3.21
......
......@@ -3,7 +3,7 @@ spack:
- compilers:
- "##TOOLCHAIN_COMPILER##"
- mpis:
- openmpi@4.1.5~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem
- openmpi@4.1.5~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.14.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem
- packages:
- binutils@2.40+headers+ld
- openblas@0.3.23
......
......@@ -3,7 +3,7 @@ spack:
- compilers:
- "##TOOLCHAIN_COMPILER##"
- mpis:
- openmpi@4.1.6~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.14.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem
- openmpi@4.1.6~cuda fabrics=ucx schedulers=slurm ^slurm@20-11-4-1+pmix^ucx@1.14.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem
- packages:
- binutils@2.40+headers+ld
- openblas@0.3.24
......
......@@ -4,7 +4,7 @@ octopus@13.0%gcc@11.3.0+mpi+netcdf+parmetis+arpack+cgal+pfft+python+likwid+libya
^pfft@1.0.8-alpha \
^nlopt@2.7.0 \
^openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm \
^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem \
^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem \
^netlib-scalapack@2.1.0 \
^fftw@3.3.10+openmp \
^zlib \
......@@ -16,7 +16,7 @@ octopus@14.0%gcc@11.3.0+mpi+netcdf+parmetis+arpack+cgal+pfft+python+likwid+libya
^pfft@1.0.8-alpha \
^nlopt@2.7.0 \
^openmpi@4.1.4~cuda fabrics=ucx schedulers=slurm \
^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem \
^ucx@1.13.1+cma~cuda+dc+dm~gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem \
^netlib-scalapack@2.1.0 \
^fftw@3.3.10+openmp \
^zlib \
......
octopus@13.0%gcc@11.3.0+mpi+cuda+netcdf+parmetis+arpack+cgal+pfft+python+likwid+libyaml~elpa+nlopt cuda_arch=70 \
^openmpi@4.1.4+cuda cuda_arch=70 fabrics=ucx schedulers=slurm \
^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem \
^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem \
^cuda@11.4+allow-unsupported-compilers \
^fftw@3.3.10+openmp+pfft_patches \
^hdf5@1.12.2+fortran \
......@@ -11,7 +11,7 @@ octopus@13.0%gcc@11.3.0+mpi+cuda+netcdf+parmetis+arpack+cgal+pfft+python+likwid+
octopus@14.0%gcc@11.3.0+mpi+cuda+netcdf+parmetis+arpack+cgal+pfft+python+likwid+libyaml~elpa+nlopt cuda_arch=70 \
^openmpi@4.1.4+cuda cuda_arch=70 fabrics=ucx schedulers=slurm \
^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs+vfs~xpmem \
^ucx@1.13.1+cma+cuda+dc+dm+gdrcopy+ib_hw_tm+knem+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs~vfs~xpmem \
^cuda@11.4+allow-unsupported-compilers \
^fftw@3.3.10+openmp+pfft_patches \
^hdf5@1.12.2+fortran \
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment