Warning: this is still beta. Please send feedback to damien.francois@uclouvain.be. Reload the page to reset.

1. Describe your job

Email address:
Job name:
Project:
Parallelization paradigm(s)
Job resources
Number of jobs in the array:
Duration : days, hours, minutes.
Number of processes:
Number of threads per process:
Memory per thread per process:
Process distribution:
Number of nodes:
Filesystem
Filesystem:
Total CPUs: | Total Memory: MB | Total CPU.Hours:

2. Choose a cluster

*

3. Copy-paste your script

#!/bin/bash
# Submission script for
#PBS -N
#PBS -q large
#PBS -r y
#PBS -q main
#PBS -r y
#PBS -W group_list=
#PBS -J 1-
#
#PBS -l walltime=:::00
#PBS -l select=
#PBS -l select=:ncpus=:ompthreads=:mem=mb
#PBS -l select=:ncpus=:mpiprocs=:ompthreads=:mem=mb
#PBS -l place=packed
#PBS -l place=scatter
#PBS -l place=scatter
#
#PBS -M
#PBS -m abe

exec > ${PBS_O_WORKDIR}/${PBS_JOBNAME}_${PBS_JOBID}.log
echo "------------------ Work dir --------------------"
cd ${PBS_O_WORKDIR} && echo ${PBS_O_WORKDIR}
echo "------------------ Job Info --------------------"
echo "Task ID: $PBS_ARRAYID"

echo "jobid : $PBS_JOBID"
echo "jobname : $PBS_JOBNAME"
echo "job type : $PBS_ENVIRONMENT"
echo "submit dir : $PBS_O_WORKDIR"
echo "queue : $PBS_O_QUEUE"
echo "user : $PBS_O_LOGNAME"
echo "threads : $OMP_NUM_THREADS"

YOUR CODE HERE

qstat -f $PBS_JOBID
#!/bin/bash
# Submission script for
#SBATCH --job-name=
#SBATCH --array=1-
#SBATCH --time=-::00 # days-hh:mm:ss
#
#SBATCH --ntasks=
#SBATCH --ntasks-per-node=
#SBATCH --nodes=
#SBATCH --cpus-per-task=
#SBATCH --mem-per-cpu= # megabytes GB
#SBATCH --partition=
#
# Uncomment the following line if your work
# is floating point intensive and CPU-bound.
### SBATCH --threads-per-core=1
#
#SBATCH --mail-user=
#SBATCH --mail-type=ALL
#
#SBATCH --comment=

export OMP_NUM_THREADS=
export MKL_NUM_THREADS=
echo "Task ID: $SLURM_ARRAY_TASK_ID"
mkdir -p $GLOBALSCRATCH/$SLURM_JOB_ID
mkdir -p "$LOCALSCRATCH/$SLURM_JOB_ID"
cp -r "$SLURM_SUBMIT_DIR/*" "$LOCALSCRATCH/$SLURM_JOB_ID"

cp -r "$LOCALSCRATCH/$SLURM_JOB_ID/*" "$SLURM_SUBMIT_DIR/" &&\
rm -rf "$LOCALSCRATCH/$SLURM_JOB_ID"


# Please note that nodes are allocated exclusively on the 'large' queue so the number of CPUs should ideally be a multiple of 24.

# Please note that the memory per CPU is preferably limited to 2625MB on the compute nodes.

© CÉCI.