wiki:sge
Differences
This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revisionNext revision | Previous revision | ||
wiki:sge [2014/09/09 09:45] – [Glide docking job on 4 CPUs] admin | wiki:sge [2017/05/01 09:26] (current) – [Orca MPI] admin | ||
---|---|---|---|
Line 1: | Line 1: | ||
- | ====== Using SGE to run jobs at KeckII | + | ====== Using SGE to run jobs at Keck Center |
- | <fc # | + | <fc # |
All jobs must be submitted from '' | All jobs must be submitted from '' | ||
Line 21: | Line 21: | ||
* Check on progress of your job in the queue: '' | * Check on progress of your job in the queue: '' | ||
- | ===== KeckII | + | ===== Keck Center |
All jobs must be submitted to the SGE queue. It is strictly prohibited to run any non-interactive CPU-consuming jobs outside of the queue. | All jobs must be submitted to the SGE queue. It is strictly prohibited to run any non-interactive CPU-consuming jobs outside of the queue. | ||
Line 29: | Line 29: | ||
The following limits are imposed on all jobs: | The following limits are imposed on all jobs: | ||
- | * max wall-clock time is 48 hrs | + | * max wall-clock time is 48 hrs (subject to change, use '' |
- | * max number of processors per user is 8 although this is dynamically changed based on cluster load. To see the current limit: '' | + | * max number of processors per user is 8 although this is dynamically changed based on the cluster load. To see the current limit: '' |
Line 49: | Line 49: | ||
Please note that old files (4 days and older) are regularly purged from ''/ | Please note that old files (4 days and older) are regularly purged from ''/ | ||
+ | |||
===== Setting up account ===== | ===== Setting up account ===== | ||
Line 61: | Line 62: | ||
chmod 640 authorized_keys | chmod 640 authorized_keys | ||
</ | </ | ||
- | |||
- | |||
- | |||
===== Running CPU intensive jobs ===== | ===== Running CPU intensive jobs ===== | ||
Line 111: | Line 109: | ||
qstat -f | qstat -f | ||
+ | |||
+ | ===== Running parallel (MPI) jobs ===== | ||
+ | |||
+ | If your application supports this you can run up to 8 parallel processes per one job. The workstations have 8 physical cores so maximum requestable number of processors is 8. <fc # | ||
+ | |||
+ | You have to use the '' | ||
+ | |||
+ | < | ||
+ | #$ -pe mpi 8 | ||
+ | </ | ||
+ | |||
+ | This requests 8 processors for your job. You have to have similar request in your application' | ||
+ | |||
+ | |||
+ | ==== Orca MPI ==== | ||
+ | |||
+ | This is an example of a SGE submit script for running the MPI version of orca on 8 processors. | ||
+ | |||
+ | < | ||
+ | #!/bin/bash | ||
+ | #$ -cwd | ||
+ | #$ -N orca_job | ||
+ | #$ -m beas | ||
+ | #$ -pe mpi 8 | ||
+ | #$ -l h_rt=60: | ||
+ | # | ||
+ | # create a scratch directory on the SDD and copy all runtime data there | ||
+ | export scratch_dir=`mktemp -d / | ||
+ | current_dir=`pwd` | ||
+ | cp * $scratch_dir | ||
+ | cd $scratch_dir | ||
+ | |||
+ | module load orca/3.0.3 | ||
+ | module load openmpi/ | ||
+ | $ORCA_PATH/ | ||
+ | |||
+ | # copy all data back from the scratch directory | ||
+ | cp * $current_dir | ||
+ | rm -rf $scratch_dir | ||
+ | </ | ||
+ | |||
+ | You also have to put this in your orca input file to tell the application to use 8 processors: | ||
+ | |||
+ | < | ||
+ | %pal nprocs 8 end | ||
+ | </ | ||
+ | |||
+ | Please note that you have to load the appropriate MPI library to use Orca. This is a compatability | ||
+ | table between different Orca nad MPI module versions: | ||
+ | |||
+ | |orca/4.0.0 | openmpi/ | ||
+ | |orca/3.0.3 | openmpi/ | ||
+ | |||
==== Amber MPI version ==== | ==== Amber MPI version ==== | ||
Line 132: | Line 183: | ||
#$ -pe mpi 2 | #$ -pe mpi 2 | ||
- | module load openmpi | + | module load openmpi/2.0.1 |
- | module load amber | + | module load amber/16 |
echo Running on host `hostname` | echo Running on host `hostname` | ||
Line 209: | Line 260: | ||
# glide docking driver script | # glide docking driver script | ||
# | # | ||
- | # rok 2014.9.9 | + | # rok 2014.9.10 |
set -xv | set -xv | ||
export SCHRODINGER_TEMP_PROJECT=$SCRATCH | export SCHRODINGER_TEMP_PROJECT=$SCRATCH | ||
Line 215: | Line 266: | ||
export SCHRODINGER_JOBDB2=$SCRATCH | export SCHRODINGER_JOBDB2=$SCRATCH | ||
export SCHRODINGER_TMPDIR=$SCRATCH | export SCHRODINGER_TMPDIR=$SCRATCH | ||
- | export SCHRODINGER_SIGUSR1=" | ||
export SCHRODINGER_JOBDIR=$SCRATCH | export SCHRODINGER_JOBDIR=$SCRATCH | ||
export SCHRODINGER_BATCHID=" | export SCHRODINGER_BATCHID=" | ||
Line 222: | Line 272: | ||
export SCHRODINGER_MAX_RETRIES=0 | export SCHRODINGER_MAX_RETRIES=0 | ||
+ | export DONE="" | ||
+ | |||
+ | function finish() { | ||
+ | echo " | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | # copy your results back to a new directory in $HOME & cleanup | ||
+ | outdir=$cwd.Results.$JOB_ID | ||
+ | mkdir $outdir | ||
+ | cp -a * $outdir | ||
+ | export DONE=1 | ||
+ | } | ||
+ | trap ' | ||
+ | |||
+ | GLIDE_OPTS=" | ||
- | GLIDE_OPTS=" | ||
cat > dock.in <<EOF | cat > dock.in <<EOF | ||
Line 231: | Line 299: | ||
$SCHRODINGER/ | $SCHRODINGER/ | ||
- | |||
- | $SCHRODINGER/ | ||
- | $SCHRODINGER/ | ||
- | $SCHRODINGER/ | ||
- | $SCHRODINGER/ | ||
- | $SCHRODINGER/ | ||
- | $SCHRODINGER/ | ||
</ | </ | ||
Line 279: | Line 340: | ||
./ | ./ | ||
- | # copy your results back to a new directory in $HOME & cleanup | + | # clean the job, if still managed by SCHRD job control |
- | outdir=$cwd.Results.$JOB_ID | + | $SCHRODINGER/ |
- | mkdir $outdir | + | $SCHRODINGER/ |
- | cp -a * $outdir | + | $SCHRODINGER/ |
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | $SCHRODINGER/ | ||
+ | |||
+ | if [ -z " | ||
+ | | ||
+ | outdir=$cwd.Results.$JOB_ID | ||
+ | mkdir $outdir | ||
+ | cp -a * $outdir | ||
+ | fi | ||
#rm -rf $SCRATCH | #rm -rf $SCRATCH | ||
</ | </ | ||
Line 298: | Line 370: | ||
==== Amber ==== | ==== Amber ==== | ||
- | The optimal AMBER job configuration for KeckII | + | The optimal AMBER job configuration for Keck II is to use 1 CPU and 1 GPU per run. |
< | < | ||
Line 314: | Line 386: | ||
#$ -l h_rt=12: | #$ -l h_rt=12: | ||
- | module load nvidia | + | module load cuda/7.5.18 |
- | module load amber | + | module load amber/16 |
export CUDA_VISIBLE_DEVICES=0 | export CUDA_VISIBLE_DEVICES=0 | ||
Line 347: | Line 420: | ||
< | < | ||
#!/bin/bash | #!/bin/bash | ||
+ | set -xv | ||
#$ -cwd | #$ -cwd | ||
#$ -q gpu.q | #$ -q gpu.q | ||
Line 384: | Line 458: | ||
< | < | ||
#!/bin/bash | #!/bin/bash | ||
+ | set -xv | ||
#$ -cwd | #$ -cwd | ||
#$ -q gpu.q | #$ -q gpu.q | ||
Line 393: | Line 468: | ||
#$ -l h_rt=48: | #$ -l h_rt=48: | ||
- | module load nvidia | + | module load namd-cuda/2.11 |
- | module load namd-cuda | + | |
export CUDA_VISIBLE_DEVICES=0 | export CUDA_VISIBLE_DEVICES=0 | ||
Line 416: | Line 490: | ||
==== Benchmarks ==== | ==== Benchmarks ==== | ||
- | These are several GPU benchmarks for CUDA enabled Amber and NAMD which should help you to estimate the Keck2 hardware performance. | + | These are several GPU benchmarks for CUDA enabled Amber and NAMD which should help you to estimate the Keck Center |
wiki/sge.1410281104.txt.gz · Last modified: 2014/09/09 09:45 by admin