Testing the username filter
This commit is contained in:
11
merlin6-test/cluster_info.json
Normal file
11
merlin6-test/cluster_info.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"name" : "merlin6-test",
|
||||
"worker_bin_path" : "/data/user/USERNAME/cryosparc/cryosparc2_worker/bin/cryosparcw",
|
||||
"cache_path" : "/scratch/",
|
||||
"send_cmd_tpl" : "{{ command }}",
|
||||
"qsub_cmd_tpl" : "sbatch {{ script_path_abs }}",
|
||||
"qstat_cmd_tpl" : "squeue -j {{ cluster_job_id }}",
|
||||
"qdel_cmd_tpl" : "scancel {{ cluster_job_id }}",
|
||||
"qinfo_cmd_tpl" : "sinfo",
|
||||
"transfer_cmd_tpl" : "cp {{ src_path }} {{ dest_path }}"
|
||||
}
|
79
merlin6-test/cluster_script.sh
Normal file
79
merlin6-test/cluster_script.sh
Normal file
@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
# cryoSPARC cluster submission script template for SLURM
|
||||
# Lane: merlin6-test v1.2.1 (2020-03-27)
|
||||
#
|
||||
# If you edit this file, run 'cryosparcm cluster connect'
|
||||
|
||||
{# This template uses jinja2 syntax. #}
|
||||
{%- macro _min(a, b) -%}
|
||||
{%- if a <= b %}{{a}}{% else %}{{b}}{% endif -%}
|
||||
{%- endmacro -%}
|
||||
|
||||
# Available variables:
|
||||
# script_path_abs={{ script_path_abs }}
|
||||
# - the absolute path to the generated submission script
|
||||
# run_cmd={{ run_cmd }}
|
||||
# - the complete command-line string to run the job
|
||||
# num_cpu={{ num_cpu }}
|
||||
# - the number of CPUs needed
|
||||
# num_gpu={{ num_gpu }}
|
||||
# - the number of GPUs needed. Note: the code will use this many GPUs
|
||||
# starting from dev id 0. The cluster scheduler or this script have the
|
||||
# responsibility of setting CUDA_VISIBLE_DEVICES so that the job code
|
||||
# ends up using the correct cluster-allocated GPUs.
|
||||
# ram_gb={{ ram_gb }}
|
||||
# - the amount of RAM needed in GB
|
||||
# job_dir_abs={{ job_dir_abs }}
|
||||
# - absolute path to the job directory
|
||||
# project_dir_abs={{ project_dir_abs }}
|
||||
# - absolute path to the project dir
|
||||
# job_log_path_abs={{ job_log_path_abs }}
|
||||
# - absolute path to the log file for the job
|
||||
# worker_bin_path={{ worker_bin_path }}
|
||||
# - absolute path to the cryosparc worker command
|
||||
# run_args={{ run_args }}
|
||||
# - arguments to be passed to cryosparcw run
|
||||
# project_uid={{ project_uid }}
|
||||
# - uid of the project
|
||||
# job_uid={{ job_uid }}
|
||||
# - uid of the job
|
||||
# job_creator={{ job_creator }}
|
||||
# - name of the user that created the job (may contain spaces)
|
||||
# cryosparc_username={{ cryosparc_username }}
|
||||
# - cryosparc username of the user that created the job (usually an email)
|
||||
|
||||
#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}
|
||||
#SBATCH --output={{ job_log_path_abs }}.out
|
||||
#SBATCH --error={{ job_log_path_abs }}.err
|
||||
#SBATCH --ntasks=1
|
||||
#SBATCH --threads-per-core=1
|
||||
#SBATCH --mem-per-cpu={{ ((ram_gb*1000)/num_cpu)|int }}M
|
||||
#SBATCH --time=0-01:00:00
|
||||
|
||||
{%- if num_gpu == 0 %}
|
||||
# Use CPU cluster
|
||||
#SBATCH --partition=general
|
||||
#SBATCH --account=merlin
|
||||
#SBATCH --cpus-per-task={{ num_cpu }}
|
||||
{%- else %}
|
||||
# Use GPU cluster
|
||||
#SBATCH --partition=gpu-test
|
||||
#SBATCH --account=merlin-gpu
|
||||
#SBATCH --gres=gpu:GTX1080:{{ _min(num_gpu, 4) }}
|
||||
{%- if num_gpu <= 2 %}
|
||||
#SBATCH --cpus-per-task={{ _min(num_cpu, 8) }}
|
||||
{%- else %}
|
||||
{# Slurm requests too many CPU sometimes; restrict to 20 per machine #}
|
||||
{%- set num_nodes = (num_gpu/4) | round(0, 'ceil') | int %}
|
||||
#SBATCH --cpus-per-task={{ _min(num_cpu, 20*num_nodes) }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
# Print hostname, for debugging
|
||||
hostname
|
||||
|
||||
# Make sure this matches the version of cuda used to compile cryosparc
|
||||
module purge
|
||||
module load cuda/10.0.130
|
||||
|
||||
srun {{ run_cmd }}
|
Reference in New Issue
Block a user