Skip to content
This repository was archived by the owner on May 29, 2025. It is now read-only.

Commit f1f2ca1

Browse files
author
John Major
committed
issues
1 parent e6d498c commit f1f2ca1

File tree

1 file changed

+50
-0
lines changed

1 file changed

+50
-0
lines changed
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
---
2+
# Adopted from
3+
# # https://github.com/cbrueffer/snakemake-aws-parallelcluster-slurm
4+
cluster:
5+
mkdir -p logs/slurm/{rule}/ &&
6+
sbatch
7+
--parsable
8+
--cpus-per-task={threads}
9+
--time={resources.time}
10+
--job-name=D-{rule}-{sample}
11+
--output=logs/slurm/{rule}/{rule}.{sample}.{jobid}.out
12+
--error=logs/slurm/{rule}/{rule}.{sample}.{jobid}.err
13+
--partition={resources.partition}
14+
--chdir=$PWD
15+
--mem={resources.mem_mb}
16+
--comment $DAY_PROJECT
17+
--distribution={resources.distribution} {resources.exclusive}
18+
default-resources:
19+
- disk_mb=1000
20+
- mem_mb=3000 # This should be total for the job, and the amount avail will be 0.95*published mem for instance type
21+
- threads=1
22+
- time=4440
23+
- partition=i8,i32,i64,i96,i128,i192
24+
- vcpu=1
25+
- distribution=block
26+
- exclusive=''
27+
resources: vcpu=2704 # SET TO AWS QUOTA LIMIT minus HEADNODE and other running ec2 spot instances
28+
jobs: 10
29+
cores: 2704 # SET TO AWS QUOTA LIMIT minus HEADNODE and other running ec2 spot instances
30+
latency-wait: 100
31+
local-cores: 16
32+
restart-times: 2
33+
max-jobs-per-second: 10
34+
keep-going: True
35+
keep-remote: True
36+
rerun-incomplete: True
37+
printshellcmds: True
38+
scheduler: greedy
39+
use-conda: True
40+
conda-frontend: conda
41+
conda-prefix: "/fsx/resources/environments/conda/USER_REGSUB/HOSTNAME"
42+
cluster-status: config/day_profiles/slurm/templates/status-scontrols.sh
43+
cluster-cancel: scancel
44+
max-status-checks-per-second: 10
45+
# slurm: True ## DO NOT USE on AWS, try for local- untested. You'll want to comment out cluster-{status,cancel} and tweak the cluster and default-resources
46+
force-use-threads: True
47+
stats: "day_pipe_stats.json"
48+
use-singularity: True
49+
singularity-prefix: "/fsx/resources/environments/containers/USER_REGSUB/HOSTNAME"
50+
singularity-args: " -B ./results:$PWD/results -B /tmp:/tmp -B /fsx:/fsx -B resources/fsx:/fsx -B /home/$USER:/home/$USER -B $PWD/:$PWD "

0 commit comments

Comments
 (0)