ds003416-mriqc/code/process.condor_submit-merge

50 lines
1.9 KiB
Text

universe = vanilla
# resource requirements for each job
request_cpus = 1
request_memory = 3000M
request_disk = 100G
# be nice and only use free resources
# nice_user = true
# tell condor that a job is self contained and the executable
# is enough to bootstrap the computation on the execute node
should_transfer_files = yes
# explicitly do not transfer anything back
# we are using datalad for everything that matters
transfer_output_files = ""
# the actual job script, nothing condor-specific in it
executable = $ENV(PWD)/code/results.merger
# the job expects these environment variables for labeling and synchronization
# - JOBID: subject AND process specific ID to make a branch name from
# (must be unique across all (even multiple) submissions)
# including the cluster ID will enable sorting multiple computing attempts
# - DSLOCKFILE: lock (must be accessible from all compute jobs) to synchronize
# write access to the output dataset
# - DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__...:
# (additional) locations for datalad to locate relevant subdatasets, in case
# a configured URL is outdated
# - GIT_AUTHOR_...: Identity information used to save dataset changes in compute
# jobs
environment = "\
JOBID=$(subject)_$(Cluster) \
DSLOCKFILE=$ENV(PWD)/.condor_datalad_lock \
GIT_AUTHOR_NAME='Felix Hoffstaedter' \
GIT_AUTHOR_EMAIL='f.hoffstaedter@fz-juelich.de' \
REPRONIM_USE_DUCT=1 \
PATH=~/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \
"
# place the job logs into PWD/logs, using the same name as for the result branches
# (JOBID)
log = $ENV(PWD)/logs/postpro_$(Cluster).log
output = $ENV(PWD)/logs/postpro_$(Cluster).out
error = $ENV(PWD)/logs/postpro_$(Cluster).err
# essential args for "results.merger"
# 1: where to clone the analysis dataset
arguments = "\
ria+file:///data/project/QC_workflow/TMP/RIA_QCworkflow#aae8905a-985f-46fb-91f5-35c772654ddd \
"
queue