diff --git a/mpi_info/.gitignore b/mpi_info/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..257a5d993896f61cd5dd2795965d42df3553ea5f --- /dev/null +++ b/mpi_info/.gitignore @@ -0,0 +1,3 @@ +mpi_info +output + diff --git a/mpi_info/Makefile b/mpi_info/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9c7c85187213d70333c108427033666a07d99fc5 --- /dev/null +++ b/mpi_info/Makefile @@ -0,0 +1,2 @@ +mpi_info: mpi_info.c + mpicc $< -o $@ diff --git a/mpi_info/load_modules.sh b/mpi_info/load_modules.sh new file mode 100755 index 0000000000000000000000000000000000000000..81909335de558a56da2a58a5fd9d86f03d458ce0 --- /dev/null +++ b/mpi_info/load_modules.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +module purge +module load spack git gcc/7.3.0 openmpi/3.1.4-pmi-cuda-ucx + diff --git a/mpi_info/mpi_info.c b/mpi_info/mpi_info.c new file mode 100644 index 0000000000000000000000000000000000000000..1ec46deb705616595d894788826c4381a6e1e0cf --- /dev/null +++ b/mpi_info/mpi_info.c @@ -0,0 +1,24 @@ +#include "mpi.h" +#include <stdio.h> +#include <stdlib.h> + +int main(int argc, char *argv[]) { + char* local_rank_str = NULL; + int local_rank = 0; + local_rank_str = getenv("SLURM_LOCALID"); + if (local_rank_str != NULL) { + local_rank = atoi(local_rank_str); + printf("slurm local rank = %d\n", local_rank); + } else { + printf("slurm local rank not defined\n"); + } + + int n_tasks, rank, length; + char host_name[MPI_MAX_PROCESSOR_NAME]; + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &n_tasks); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Get_processor_name(host_name, &length); + printf("Number of tasks = %d, rank = %d, running on %s\n", n_tasks, rank, host_name); + MPI_Finalize(); +} diff --git a/mpi_info/mpi_info.slurm b/mpi_info/mpi_info.slurm new file mode 100644 index 0000000000000000000000000000000000000000..419003475dc9700ac6d2828f2dc65b831edbb9d2 --- /dev/null +++ b/mpi_info/mpi_info.slurm @@ -0,0 +1,15 @@ +#!/bin/bash +#SBATCH -J mpi_info +#SBATCH -o output/mpi_info_%j.out +#SBATCH -e output/mpi_info_%j.err +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=1 +#SBATCH --gres=gpu:0 +#SBATCH --cpus-per-task=1 +#SBATCH --ntasks-per-core=1 +#SBATCH --threads-per-core=1 +#SBATCH --mem=1G +#SBATCH --time 00:01:00 + +source ./load_modules.sh +srun ./mpi_info