SBATCH: Hybrid MPI/Threaded job

Hybrid

  #!/bin/bash
  #SBATCH --job-name=hybrid_job_test      # Job name
  #SBATCH --mail-type=END,FAIL            # Mail events (NONE, BEGIN, END, FAIL, ALL)
  #SBATCH --mail-user=email@ufl.edu       # Where to send mail 
  #SBATCH --ntasks=8                      # Number of MPI ranks
  #SBATCH --cpus-per-task=4               # Number of cores per MPI rank 
  #SBATCH --nodes=2                       # Number of nodes
  #SBATCH --ntasks-per-node=4             # How many tasks on each node
  #SBATCH --ntasks-per-socket=2           # How many tasks on each CPU or socket
  #SBATCH --mem-per-cpu=100mb             # Memory per core
  #SBATCH --time=00:05:00                 # Time limit hrs:min:sec
  #SBATCH --output=hybrid_test_%j.log     # Standard output and error log
  pwd; hostname; date

  module load  gcc/9.3.0  openmpi/4.1.1 raxml-ng/1.1.0

  srun --mpi=$HPC_PMIX  raxml-ng ...

  date


  #!/bin/bash
  #SBATCH --job-name=LAMMPS
  #SBATCH --output=LAMMPS_%j.out
  #SBATCH --mail-type=END,FAIL
  #SBATCH --mail-user=<email_address>
  #SBATCH --nodes=4              # Number of nodes
  #SBATCH --ntasks=8             # Number of MPI ranks
  #SBATCH --ntasks-per-node=2    # Number of MPI ranks per node
  #SBATCH --ntasks-per-socket=1  # Number of tasks per processor socket on the node
  #SBATCH --cpus-per-task=8      # Number of OpenMP threads for each MPI process/rank
  #SBATCH --mem-per-cpu=2000mb   # Per processor memory request
  #SBATCH --time=4-00:00:00      # Walltime in hh:mm:ss or d-hh:mm:ss
  date;hostname;pwd

  module load gcc/12.2.0 openmpi/4.1.5

  export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK

  srun --mpi=$HPC_PMIX /path/to/app/lmp_gator2 < in.Cu.v.24nm.eq_xrd

  date

UP

category: index