Skip to content

Commit

Permalink
remove threads directive from each rule. every thread should get a si…
Browse files Browse the repository at this point in the history
…ngle core
  • Loading branch information
aryarm committed Sep 11, 2021
1 parent a9a87a2 commit a4ad1a1
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 18 deletions.
10 changes: 0 additions & 10 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ rule stitch:
files = directory(config['out']+"/{sample}/stitch"+("-lowQual" if check_config('low_qual_ortho', check_config('parallel')) else "")+"/stitched.files")
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/stitch"+("-lowQual" if check_config('low_qual_ortho', check_config('parallel')) else "")+".tsv"
threads: config['cores']
shell:
"scripts/stitch.py {params} {input} {output.project}"

Expand All @@ -124,7 +123,6 @@ rule export_ortho:
str(Path(rules.stitch.output.project).parents[0])+"/ortho.tiff"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/export_ortho.tsv"
threads: config['cores']
shell:
"scripts/export_ortho.py {input} {output}"

Expand All @@ -139,7 +137,6 @@ rule segment:
low = config['out']+"/{sample}/segments/low/{image}.json" if check_config('parallel') else config['out']+"/{sample}/segments/low.json"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/segments/"+("{image}" if check_config('parallel') else "ortho")+".tsv"
threads: config['cores']
shell:
"scripts/segment.py {params} {input} {output}"

Expand All @@ -154,7 +151,6 @@ rule transform:
confidence="(high|low)"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/transform/{confidence}-{image}.json"
threads: config['cores']
shell:
"scripts/transform.py {input} {output}"

Expand Down Expand Up @@ -192,7 +188,6 @@ rule watershed:
segments = config['out']+"/{sample}/segments"+exp_str()+".json"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/watershed"+exp_str()+".tsv"
threads: config['cores']
shell:
"scripts/watershed.py {input.ortho} {params.high_dir} {params.low_dir} {output.segments}"

Expand Down Expand Up @@ -283,7 +278,6 @@ rule train:
config['out']+"/{sample}/train"+exp_str()+"/model.rda",
config['out']+"/{sample}/train"+exp_str()+"/variable_importance.tsv"
conda: "envs/classify.yml"
threads: config['cores']
shell:
"Rscript scripts/classify_train.R {input} {output}"

Expand Down Expand Up @@ -327,7 +321,6 @@ rule classify:
config['out']+"/{sample}/classify"+exp_str()+"/{image}.tsv"
conda: "envs/classify.yml"
benchmark: config['out']+"/{sample}/benchmark/classify"+exp_str()+"/{image}.tsv"
threads: config['cores']
shell:
"Rscript scripts/classify_test.R {input} {output}"

Expand All @@ -338,7 +331,6 @@ rule test:
config['out']+"/{sample}/test"+exp_str()+"/classify/{image}.tsv"
conda: "envs/classify.yml"
benchmark: config['out']+"/{sample}/benchmark/test"+exp_str()+"/classify/{image}.tsv"
threads: config['cores']
shell:
"Rscript scripts/classify_test.R {input} {output}"

Expand Down Expand Up @@ -434,7 +426,6 @@ rule segments_map:
config['out']+"/{sample}/segments-map"+exp_str()+".tiff"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/segments-map"+exp_str()+".tsv"
threads: config['cores']
shell:
"scripts/map.py -l {input.img} {input.labels} {output}"

Expand All @@ -448,7 +439,6 @@ rule map:
config['out']+"/{sample}/map"+exp_str()+".tiff"
conda: "envs/default.yml"
benchmark: config['out']+"/{sample}/benchmark/map"+exp_str()+".tsv"
threads: config['cores']
shell:
"scripts/map.py {input.img} {input.labels} {output} {input.predicts}"

Expand Down
8 changes: 0 additions & 8 deletions config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,6 @@ model: data/models/test-all-exp.rda
# Defaults to 'out' if not provided
out: out

# specifying the number of cores used to process each dataset
# TODO: note that this wouldn't work yet - we need to have parallel environments defined
# in purves first before we can use snakemake's multithread feature
# I just have it defined here so in the future if we have parallel environment
# we could use it
cores: 5 # The number of cores to use for this step


# FOR THE RULE extract_images (only uncomment if using that rule)
# specifying the list of segment labels that you want to extract the
# source images for
Expand Down

0 comments on commit a4ad1a1

Please sign in to comment.