Examples Fat Nodes

Examples for Job Command Files for the SuperMUC Fat nodes

Parallel MPI Job (IBM MPI)

#!/bin/bash
# DO NOT USE environment = COPY_ALL
#@ job_type = parallel
#@ class = fat
#@ node = 4
#@ total_tasks=156
## other example
##@tasks_per_node = 39
#@ wall_clock_limit = 1:20:30
##                    1 h 20 min 30 secs
#@ job_name = mytest
#@ network.MPI = sn_all,not_shared,us
#@ initialdir = $(home)/mydir
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=youremail_at_yoursite.xx
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
mpiexec -n 156 ./myprog.exe
# other example
# poe ./myprog.exe

Parallel MPI Job (Intel MPI)

#!/bin/bash
# DO NOT USE environment = COPY_ALL
#@ job_type = MPICH
#@ class = fat
#@ node = 4
#@ total_tasks=156
## other example
##@tasks_per_node = 39
#@ wall_clock_limit = 1:20:30
##                    1 h 20 min 30 secs
#@ job_name = mytest
#@ network.MPI = sn_all,not_shared,us
#@ initialdir = $(home)/mydir
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=youremail_at_yoursite.xx
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
#setup of environment
module unload mpi.ibm
module load mpi.intel
mpiexec -n 156 ./myprog.exe

Hybrid MPI Job (IBM MPI)

#!/bin/bash
# DO NOT USE environment = COPY_ALL
#@ job_type = parallel
#@ class = fat
#@ node = 4
#@ total_tasks=10
## other example
##@ tasks_per_node = 4
#@ wall_clock_limit = 1:20:30
##                    1 h 20 min 30 secs
#@ job_name = mytest
#@ network.MPI = sn_all,not_shared,us
#@ initialdir = $(home)/mydir
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=youremail_at_yoursite.xx
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
export MP_SINGLE_THREAD=no
export OMP_NUM_THREADS=10
# Pinning
export MP_TASK_AFFINITY=core:$OMP_NUM_THREADS
mpiexec -n 16 ./myprog.exe
# other example
# poe ./myprog.exe

Hybrid MPI/OpenMP Job (Intel MPI)

#!/bin/bash
# DO NOT USE environment = COPY_ALL
#@ job_type = MPICH
#@ class = fat
#@ node = 4
#@ total_tasks=10
## other example
##@ tasks_per_node = 4
#@ wall_clock_limit = 1:20:30
##                    1 h 20 min 30 secs
#@ job_name = mytest
#@ network.MPI = sn_all,not_shared,us
#@ initialdir = $(home)/mydir
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=youremail_at_yoursite.xx
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
#setup of environment
module unload mpi.ibm
module load mpi.intel
export OMP_NUM_THREADS=10 
#optional: 
#module load mpi_pinning/hybrid_blocked
mpiexec -n 16 ./myprog.exe

Pure OpenMP Job (single node)

#!/bin/bash
# DO NOT USE environment = COPY_ALL
#@ wall_clock_limit = 01:20:00,01:19:30
## with softlimit
#@ job_name = mytest
#@ job_type = parallel
#@ class = fat
#@ node = 1
#@ total_tasks = 1
## OR
##@ tasks_per_node = 1
#@ node_usage = not_shared
#@ initialdir = $(home)/mydir
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=youremail_at_yoursite.xx
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
export OMP_NUM_THREADS=40
export KMP_AFFINITY="granularity=core,compact,1"