| 14 | {{{ |
| 15 | sbatch |
| 16 | }}} |
| 17 | |
| 18 | In the command above, substitute your own user id, fasta file and the paths to both the fasta file and the working directory. In this example, the job that is submitted to the SLURM scheduler might look like: |
| 19 | |
| 20 | {{{ |
| 21 | #!/bin/bash |
| 22 | |
| 23 | #SBATCH --job-name=AFbatch |
| 24 | #SBATCH --nodes=1 # ensure cores are on one node |
| 25 | #SBATCH --ntasks=1 # run a single task |
| 26 | #SBATCH --cpus-per-task=8 # number of cores/threads requested. |
| 27 | #SBATCH --mem=64gb # memory requested. |
| 28 | #SBATCH --partition=nvidia-A6000-20 # partition (queue) to use |
| 29 | #SBATCH --output AFbatch1.5.5.out # write output to file. |
| 30 | #SBATCH --gres=gpu:1 # Required for GPU access |
| 31 | |
| 32 | export PATH="/nfs/apps/test/colab155test/localcolabfold/colabfold-conda/bin:$PATH" |
| 33 | |
| 34 | workpath=/lab/BaRC_projects/labs/Gehring_Lab/Carly_Martin_ProteinComplex_Feb_2024 |
| 35 | cd ${workpath} |
| 36 | |
| 37 | colabfold_batch --msa-mode mmseqs2_uniref_env --model-type alphafold2_multimer_v3 --rank multimer fasta/RALF23_FERONIA_complex.fa RALF23_FERONIA_CF_complex |
| 38 | |
| 39 | #!/bin/bash |
| 40 | |
| 41 | #SBATCH --job-name=AF2 # friendly name for job. |
| 42 | #SBATCH --nodes=1 # ensure cores are on one node |
| 43 | #SBATCH --ntasks=1 # run a single task |
| 44 | #SBATCH --cpus-per-task=8 # number of cores/threads requested. |
| 45 | #SBATCH --mem=64gb # memory requested. |
| 46 | #SBATCH --partition=nvidia-t4-20 # partition (queue) to use |
| 47 | #SBATCH --output output-%j.out # %j inserts jobid to STDOUT |
| 48 | #SBATCH --gres=gpu:1 # Required for GPU access |
| 49 | |
| 50 | export TF_FORCE_UNIFIED_MEMORY=1 |
| 51 | export XLA_PYTHON_CLIENT_MEM_FRACTION=4 |
| 52 | |
| 53 | export OUTPUT_NAME='model_1' |
| 54 | export ALPHAFOLD_DATA_PATH='/alphafold/data.2023b' # Specify ALPHAFOLD_DATA_PATH |
| 55 | |
| 56 | cd $AF2_WORK_DIR |
| 57 | singularity run -B $AF2_WORK_DIR:/af2 -B $ALPHAFOLD_DATA_PATH:/data -B .:/etc --pwd /app/alphafold --nv /alphafold/alphafold_2.3.2.sif --data_dir=/data/ --output_dir=/af2/$FASTA_PATH --fasta_paths=/af2/$FASTA_PATH/$FASTA_NAME --max_template_date=2050-01-01 --db_preset=full_dbs --bfd_database_path=/data/bfd/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt --uniref30_database_path=/data/uniref30/UniRef30_2023_02 --uniref90_database_path=/data/uniref90/uniref90.fasta --mgnify_database_path=/data/mgnify/mgy_clusters_2022_05.fa --template_mmcif_dir=/data/pdb_mmcif/mmcif_files --obsolete_pdbs_path=/data/pdb_mmcif/obsolete.dat --use_gpu_relax=True --model_preset=monomer --pdb70_database_path=/data/pdb70/pdb70 |
| 58 | |
| 59 | # Email the STDOUT output file to specified address. |
| 60 | /usr/bin/mail -s "$SLURM_JOB_NAME $SLURM_JOB_ID" $USERNAME@wi.mit.edu < $AF2_WORK_DIR/output-${SLURM_JOB_ID}.out |
| 61 | }}} |
| 62 | |
| 63 | The following instructions allow you to run AlphaFold-Multimer locally without using ColabFold: |
| 64 | |