slurm
Differences
This shows you the differences between two versions of the page.
| Both sides previous revisionPrevious revisionNext revision | Previous revision | ||
| slurm [2025/01/13 12:45] – [Where to begin] amcguire | slurm [2025/06/30 17:59] (current) – amcguire | ||
|---|---|---|---|
| Line 88: | Line 88: | ||
| === Default Quotas === | === Default Quotas === | ||
| By default we set a job to be run on one CPU and allocate 100MB of RAM. If you require more than that you should specify what you need. Using the following options will do: '' | By default we set a job to be run on one CPU and allocate 100MB of RAM. If you require more than that you should specify what you need. Using the following options will do: '' | ||
| + | |||
| + | === MPI Usage === | ||
| + | The AI cluster supports the use of MPI. The following example illustrates its basic use. | ||
| + | |||
| + | < | ||
| + | amcguire@fe01: | ||
| + | #include < | ||
| + | #include < | ||
| + | #include < | ||
| + | |||
| + | int main(int argc, char **argv) { | ||
| + | // Initialize MPI | ||
| + | MPI_Init(& | ||
| + | |||
| + | // Get the number of processes in the global communicator | ||
| + | int count; | ||
| + | MPI_Comm_size(MPI_COMM_WORLD, | ||
| + | |||
| + | // Get the rank of the current process | ||
| + | int rank; | ||
| + | MPI_Comm_rank(MPI_COMM_WORLD, | ||
| + | |||
| + | // Get the current hostname | ||
| + | char hostname[1024]; | ||
| + | gethostname(hostname, | ||
| + | |||
| + | // Print a hello world message for this rank | ||
| + | printf(" | ||
| + | |||
| + | // Finalize the MPI environment before exiting | ||
| + | MPI_Finalize(); | ||
| + | } | ||
| + | amcguire@fe01: | ||
| + | #!/bin/bash | ||
| + | #SBATCH -J mpi-hello | ||
| + | #SBATCH -n 2 # Number of processes | ||
| + | #SBATCH -t 0: | ||
| + | #SBATCH -o hello-job.out | ||
| + | |||
| + | # Disable the Infiniband transport for OpenMPI (not present on all clusters) | ||
| + | #export OMPI_MCA_btl=" | ||
| + | |||
| + | # Run the job (assumes the batch script is submitted from the same directory) | ||
| + | mpirun -np 2 ./mpi-hello | ||
| + | |||
| + | amcguire@fe01: | ||
| + | amcguire@fe01: | ||
| + | -rwxrwx--- 1 amcguire amcguire 16992 Jun 30 10:49 mpi-hello | ||
| + | amcguire@fe01: | ||
| + | Submitted batch job 1196702 | ||
| + | amcguire@fe01: | ||
| + | Hello from process 0 of 2 on host p001 | ||
| + | Hello from process 1 of 2 on host p002 | ||
| + | </ | ||
| === Exclusive access to a node === | === Exclusive access to a node === | ||
/var/lib/dokuwiki/data/attic/slurm.1736793923.txt.gz · Last modified: 2025/01/13 12:45 by amcguire