diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b767fa3cca97d32726ebdc98172a4b4f0f694140..2a19d622500d8e5b48612ffe8bb2ace47ff469e9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: - id: check-merge-conflict - id: check-ast - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: 'v0.5.0' + rev: 'v0.5.1' hooks: - id: ruff args: [ "--fix" ] diff --git a/clowm/slurm/rest_client.py b/clowm/slurm/rest_client.py index c7602b6e2631c97c2de30dd70651df9f740f10e1..73e9cd86642bf1911051736201f9aba2c9067792 100644 --- a/clowm/slurm/rest_client.py +++ b/clowm/slurm/rest_client.py @@ -84,7 +84,9 @@ class SlurmClient: ID of the job to cancel. """ with tracer.start_as_current_span("slurm_cancel_job"): - await self._client.delete(self._base_url + f"/job/{job_id}", headers=self._headers) + await self._client.delete( + self._base_url + f"/job/{job_id}", params={"signal": "SIGINT"}, headers=self._headers + ) async def job_state(self, job_id: int) -> JobState: """ diff --git a/clowm/slurm/script_templates/nextflow_command.sh.tmpl b/clowm/slurm/script_templates/nextflow_command.sh.tmpl index bdfa8b07bcec11b26b309a8ca90dde862c194a82..9e6e20813ed8fb78534bdcc4fb8db1076c33de4f 100644 --- a/clowm/slurm/script_templates/nextflow_command.sh.tmpl +++ b/clowm/slurm/script_templates/nextflow_command.sh.tmpl @@ -13,6 +13,41 @@ fi cd $(dirname $NXF_WORK) +function cleanup() +{ + if [ ${1 if settings.cluster.execution_cleanup else 0} -eq 1 ] + then + rm -r $PWD + fi +} + +function uploadFiles() +{ + S3COMMANDFILE=$PWD/s3commands.txt + % if debug_s3_path: + echo "cp --include '*/.command*' $NXF_WORK/ ${debug_s3_path}/" >> $S3COMMANDFILE + echo "cp $PWD/.nextflow.log ${debug_s3_path}/nextflow-debug.log" >> $S3COMMANDFILE + % endif + % if logs_s3_path: + echo "cp $NEXTFLOW_LOG ${logs_s3_path}/nextflow.log" >> $S3COMMANDFILE + % endif + % if provenance_s3_path: + echo "cp --include 'nf-prov_*' $NXF_WORK/ ${provenance_s3_path}/" >> $S3COMMANDFILE + % endif + + if [[ -s $S3COMMANDFILE ]]; then + docker run --rm \ + -u "$(id -u):$(id -g)" \ + -e S3_ENDPOINT_URL="${settings.s3.uri}" \ + -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ + -v $PWD:$PWD \ + peakcom/s5cmd:v2.2.2 \ + run $S3COMMANDFILE + fi +} + + PWD=$(pwd) PRE_S3COMMANDFILE=$PWD/pre_s3commands.txt @@ -36,6 +71,8 @@ run $PRE_S3COMMANDFILE NEXTFLOW_LOG=$PWD/nextflow.log +trap 'uploadFiles;cleanup;exit 1' INT + ${settings.cluster.nxf_bin} run ${repo.url} \ % if scm_file_id is not None and repo.provider != 'github': -hub ${SCMProvider.generate_name(scm_file_id)} \ @@ -60,28 +97,5 @@ ${settings.cluster.nxf_bin} run ${repo.url} \ -params-file params.json \ | tee $NEXTFLOW_LOG -S3COMMANDFILE=$PWD/s3commands.txt -% if debug_s3_path: -echo "cp --include '*/.command*' $NXF_WORK/ ${debug_s3_path}/" >> $S3COMMANDFILE -echo "cp $PWD/.nextflow.log ${debug_s3_path}/nextflow-debug.log" >> $S3COMMANDFILE -% endif -% if logs_s3_path: -echo "cp $NEXTFLOW_LOG ${logs_s3_path}/nextflow.log" >> $S3COMMANDFILE -% endif -% if provenance_s3_path: -echo "cp --include 'nf-prov_*' $NXF_WORK/ ${provenance_s3_path}/" >> $S3COMMANDFILE -% endif - -if [[ -s $S3COMMANDFILE ]]; then - docker run --rm \ --u "$(id -u):$(id -g)" \ --e S3_ENDPOINT_URL="${settings.s3.uri}" \ --e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ --e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ --v $PWD:$PWD \ -peakcom/s5cmd:v2.2.2 \ -run $S3COMMANDFILE -fi -% if settings.cluster.execution_cleanup: -rm -r $PWD -%endif +uploadFiles +cleanup