RNA was extracted from the digestive gland of 3 healthy Sydney Rock Oysters (SRO, Saccostrea glomerata) and 3 SRO infected with QX disease (as confirmed by PCR assay on DNA extracted from each tissue) using the Trizol method. The RNA was sent for sequencing on an Illumina NovaSeq 6000, generating 150 bp paired-end reads (Macrogen, Korea).
The reads were downloaded to the Griffith University High Performance Computing cluster ‘Gowonda’ and the University of Queensland High Performance Computing cluster ‘Bunya’ (access provided by The Queensland Cyber Infrastructure Foundation (QCIF)) for bioinformatics processing and analyses, following the guidelines by Harvard Informatics (see link) . These included error-correction with Rcorrector v1.5.0 (Song and Florea 2015) of the reads from each set (healthy SRO and QX-infected SRO) and removal of “unfixable” reads.
# setup environment
CONDA_NAME="genomics"
mamba install -n $CONDA_NAME rcorrector agat # can be in the base environment
cd $HOME/etc/tools/Trinity # ~/aDNA/tools
# install additional utilities
git clone https://github.com/harvardinformatics/TranscriptomeAssemblyTools.git
WORKDIR=/scratch/aDNA/niki_qx
mkdir -p $WORKDIR
cd $WORKDIR
# Prepare a general array PBS script
echo '#!/bin/bash
#PBS -V
cd $WORKDIR
>&2 echo Current working directory: $PWD
source ~/.bashrc
conda activate $CONDA_NAME
set -Eeo pipefail'"
gawk -v ARRAY_IND=\$PBS_ARRAY_INDEX 'NR==ARRAY_IND' \$CMDS_FILE | bash" > ${WORKDIR}/array.pbspro
# download reads
rclone copy -P GU_owncloud:Shared/Oyster_QX_Disease/QX_RNA-Seq ./QX_RNA-Seq
cd QX_RNA-Seq/data && mkdir corrected_reads
# prepare commands to run Rcorrector for each sample in parallel
# ls -1 NN01-5*_1.fastq.gz | parallel -k --dry-run --rpl "{infile2} s:_1:_2:;" "run_rcorrector.pl -1 {} -2 {infile2} -verbose -t \$NCPUS -od corrected_reads" > rcorrector.cmds
# submit the jobs
# qsub -J1-$(cat rcorrector.cmds | wc -l) -l select=1:ncpus=8:mem=16GB,walltime=10:00:00 -N rcorr_batch -v "CMDS_FILE=rcorrector.cmds","WORKDIR=$PWD","CONDA_NAME=$CONDA_NAME" ${WORKDIR}/array.pbspro
# restart failed jobs at stage 3
# ls -1 NN01-5*_1.fastq.gz | parallel -k --dry-run --rpl "{infile2} s:_1:_2:;" "run_rcorrector.pl -1 {} -2 {infile2} -t \$NCPUS -od corrected_reads -stage 3" > rcorrector_s3.cmds
# submit the jobs
# qsub -J1-$(cat rcorrector_s3.cmds | wc -l) -l select=1:ncpus=8:mem=24GB,walltime=10:00:00 -N rcorr_batch_s3 -v "CMDS_FILE=rcorrector_s3.cmds","WORKDIR=$PWD","CONDA_NAME=$CONDA_NAME" ${WORKDIR}/array.pbspro
# didn't work for NN-567 for some reason
# restart job for NN-567 from stage 1
# echo "source ~/.bashrc; conda activate $CONDA_NAME; cd $PWD; set -Eeo pipefail; run_rcorrector.pl -1 NN01-567_1.fastq.gz -2 NN01-567_2.fastq.gz -verbose -t \$NCPUS -od corrected_reads" | qsub -l select=1:ncpus=12:mem=48gb,walltime=20:00:00 -N rcorrect_567 -q bigmem
# Run Rcorrect on Control samples
mkdir corrected_reads
R1_FILES_CON=$(seq 1 3 | parallel -k echo "NN01-56{}_1.fastq.gz" | paste -sd,)
R2_FILES_CON=$(seq 1 3 | parallel -k echo "NN01-56{}_2.fastq.gz" | paste -sd,)
# submit job
echo "source ~/.bashrc; conda activate $CONDA_NAME; cd $PWD; set -Eeo pipefail; run_rcorrector.pl -1 $R1_FILES_CON -2 $R2_FILES_CON -t \$NCPUS -od corrected_reads" | qsub -l select=1:ncpus=12:mem=48gb,walltime=50:00:00 -N rcorr_sro_control -q bigmem
# combine and correct QX-infected samples
R1_FILES_QX=$(printf "67\n79\n80\n" | parallel -k echo "NN01-5{}_1.fastq.gz" | paste -sd,)
R2_FILES_QX=$(printf "67\n79\n80\n" | parallel -k echo "NN01-5{}_2.fastq.gz" | paste -sd,)
# submit job
echo "source ~/.bashrc; conda activate $CONDA_NAME; cd $PWD; set -Eeo pipefail; run_rcorrector.pl -1 $R1_FILES_QX -2 $R2_FILES_QX -t \$NCPUS -od corrected_reads" | qsub -l select=1:ncpus=12:mem=48gb,walltime=50:00:00 -N rcorr_sro_qx -q bigmem
# combine reads from individually-corrected samples
cd corrected_reads
echo "source ~/.bashrc; cd $PWD; set -Eeo pipefail; seq 1 3 | parallel -k -j4 cat NN01-56{}_1.cor.fq.gz > SRO_control_R1.cor.fastq.gz ; seq 1 3 | parallel -k -j4 cat NN01-56{}_2.cor.fq.gz > SRO_control_R2.cor.fastq.gz ; "'printf "67\n79\n80\n" | parallel -k -j4 cat NN01-5{}_1.cor.fq.gz > SRO_QX_R1.cor.fastq.gz ; printf "67\n79\n80\n" | parallel -k -j4 cat NN01-5{}_2.cor.fq.gz > SRO_QX_R2.cor.fastq.gz' | qsub -l select=1:ncpus=8:mem=24gb,walltime=3:00:00 -N combine_corr_reads
# remove unfixable reads
# SRO control
echo "source ~/.bashrc; cd $PWD; set -Eeo pipefail; python ~/etc/tools/Trinity/TranscriptomeAssemblyTools/utilities/FilterUncorrectabledPEfastq.py -1 SRO_control_R1.cor.fastq.gz -2 SRO_control_R2.cor.fastq.gz -s SRO_control " | qsub -l select=1:ncpus=8:mem=24gb,walltime=5:00:00 -N rm_unfixable_control
# SRO-QX
echo "source ~/.bashrc; cd $PWD; set -Eeo pipefail; python ~/etc/tools/Trinity/TranscriptomeAssemblyTools/utilities/FilterUncorrectabledPEfastq.py -1 SRO_QX_R1.cor.fastq.gz -2 SRO_QX_R2.cor.fastq.gz -s SRO_QX" | qsub -l select=1:ncpus=8:mem=24gb,walltime=5:00:00 -N rm_unfixable_qx
# compress files with genozip so we could upload them
ls -1 *.fastq.gz | parallel --dry-run genozip -@ \$NCPUS -f {} > genozip_files.cmds
# submit the jobs
qsub -J1-$(cat genozip_files.cmds | wc -l) -l select=1:ncpus=8:mem=16GB,walltime=4:00:00 -N genozip_fastq -v "CMDS_FILE=genozip_files.cmds","WORKDIR=$PWD","CONDA_NAME=base" ${WORKDIR}/array.pbspro
# upload files to Research Space
rclone copy -P $WORKDIR/QX_RNA-Seq/data GU_owncloud:Shared/Oyster_QX_Disease/QX_RNA-Seq/data
WORKDIR=/scratch/project/adna/
mkdir -p $WORKDIR/QX_RNA-Seq/data/combined_reads
cd $WORKDIR/QX_RNA-Seq/data
# prepare slurm script
echo '#!/bin/bash --login
#SBATCH --job-name=QX-combine-reads
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%j.log
#SBATCH --cpus-per-task=8
#SBATCH --mem=16G
#SBATCH --time=4:00:00
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
source ~/.bashrc
set -Eeo pipefail
cd $SLURM_SUBMIT_DIR
srun seq 1 3 | parallel -k cat NN01-56{}_1.fastq.gz > combined_reads/SRO_control_R1.fastq.gz; \
seq 1 3 | parallel -k cat NN01-56{}_2.fastq.gz > combined_reads/SRO_control_R2.fastq.gz; \
printf "67\n79\n80\n" | parallel -k cat NN01-5{}_1.fastq.gz > combined_reads/SRO_QX_R1.fastq.gz; \
printf "67\n79\n80\n" | parallel -k cat NN01-5{}_2.fastq.gz > combined_reads/SRO_QX_R2.fastq.gz' > combine_reads.slurm
# submit the job
sbatch combine_reads.slurm
echo '#!/bin/bash --login
#SBATCH --job-name=QX-combine-rcorrect
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%j.log
#SBATCH --cpus-per-task=8
#SBATCH --mem=16G
#SBATCH --time=4:00:00
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
source ~/.bashrc
set -Eeo pipefail
cd $SLURM_SUBMIT_DIR
srun seq 1 3 | parallel -k cat NN01-56{}_1.cor.fq.gz > SRO_control_R1.cor.fastq.gz; \
seq 1 3 | parallel -k cat NN01-56{}_2.cor.fq.gz > SRO_control_R2.cor.fastq.gz; \
printf "67\n79\n80\n" | parallel -k cat NN01-5{}_1.cor.fq.gz > SRO_QX_R1.cor.fastq.gz; \
printf "67\n79\n80\n" | parallel -k cat NN01-5{}_2.cor.fq.gz > SRO_QX_R2.cor.fastq.gz' > combine_cor_reads.slurm
# submit the job
sbatch combine_cor_reads.slurm
# remove unfixable reads
# SRO control
echo '#!/bin/bash --login
#SBATCH --job-name=rm_unfixable_control
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%j.log
#SBATCH --cpus-per-task=8
#SBATCH --mem=24G
#SBATCH --time=6:00:00
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
source ~/.bashrc
set -Eeo pipefail
cd $SLURM_SUBMIT_DIR
srun python ~/etc/tools/TranscriptomeAssemblyTools/utilities/FilterUncorrectabledPEfastq.py -1 SRO_control_R1.cor.fastq.gz -2 SRO_control_R2.cor.fastq.gz -s SRO_control ' > rm_unfixable_control.slurm
# submit the job
sbatch rm_unfixable_control.slurm
# SRO-QX
echo '#!/bin/bash --login
#SBATCH --job-name=rm_unfixable_qx
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%j.log
#SBATCH --cpus-per-task=8
#SBATCH --mem=24G
#SBATCH --time=6:00:00
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
source ~/.bashrc
set -Eeo pipefail
cd $SLURM_SUBMIT_DIR
srun python ~/etc/tools/TranscriptomeAssemblyTools/utilities/FilterUncorrectabledPEfastq.py -1 SRO_QX_R1.cor.fastq.gz -2 SRO_QX_R2.cor.fastq.gz -s SRO_QX' > rm_unfixable_qx.slurm
# submit the job
sbatch rm_unfixable_qx.slurm
A De-Novo transcriptome was assembled from the combined error-corrected reads using TransPi (Rivera-Vicéns et al. 2022). In brief, TransPi performs a complete transcriptome assembly, basic annotation and assessment pipeline using well-established methods:
TransPi needs to be installed and configured (download annotation databases and set their paths in a nextflow.config
file). Unfortunately, a lot of the configuration for TransPi to work on GU Gowonda needs to be done manually, including downloading the Singularity containers and setting up the resources required for each process - see the current configuration and the following reported issues:
/scratch/project/adna
), I’ve requested additional storage on the adna
folder on Bunya (now I have 10TB there)$TMPDIR
on the compute nodes (change the flag scracth=true
into scratch='/scratch/project/adna/tmp'
in ~/.nextflow/bunya.config
)$SINGULARITY_TMPDIR
that is running out of space or not properly configured, see this thread on GitHub$v
is defined multiple times). I fixed it by using printf
statement instead of the multiple echo
ones.summary_custom_uniprot
) fail due to set -o pipefail
directive (complex pipelines, including grep
, etc.). It can be fixed by adding set +o pipefail
to the top of the script.Rnammer
fails - it needs to be setup to use hmmsearch2
(which can be downloaded from this link and remove the --cpu 1
flag from core-rnammer
, see details here)rnaQUAST
fails – it requires an additional tool (GeneMark S-T) to be installed separately and put in the container’s $PATH
(add export PATH=\$PATH:\$HOME/etc/tools/Annotation/GeneMarkST/
at the top of the script of the rnaquast process in the main nextflow). See details here.signalp
fails – need to edit the executable to allow it to find where it is being run from and increase the sequence limit (see here) with the following command:sed -ri.bak 's|SIGNALP} = '\''.+|SIGNALP} = "\$FindBin::RealBin"|; s/BEGIN/use FindBin\; \nBEGIN/; s/MAX_ALLOWED_ENTRIES=.+/MAX_ALLOWED_ENTRIES=2000000;/' signalp
summary_custom_uniprot
fails because of weird table merging and file redundancies done in bash. I edited Tranbspi/bin/custom_uniprot_hits.R
to perform these tasks and export the csv
file with the results.A modified version of TransPi with these fixes is available at https://github.com/IdoBar/TransPi.
# install TransPi
cd /home/ibar/adna/tools #Bunya
git clone https://github.com/idobar/TransPi.git # my version
bash ./TransPi/precheck_TransPi.sh $PWD/TransPi
sed -i "s/'singularity' || workflow.containerEngine == 'docker'/'singularity' || workflow.containerEngine == 'docker' || workflow.containerEngine == 'apptainer'/g" $PWD/TransPi/TransPi.nf # add support for Apptainer (it will also require NXF_VER=22.11.1-edge)
# download the pipeline containers
grep "singularity_pull_docker_container" $PWD/TransPi/TransPi.nf | cut -f 2 -d '"' | sort | uniq | tr -d \' | parallel --dry-run --rpl "{outfile} s=https://==; s=[:/]+=-=g;" aria2c -c {} -o $NXF_SINGULARITY_CACHEDIR/{outfile}.img
# Install GeneMark
mkdir -p ~/etc/tools/Annotation/GeneMarkST
cd !$
aria2c -x5 http://topaz.gatech.edu/GeneMark/tmp/GMtool_ZozF5/gmst_linux_64.tar.gz
tar xzf gmst_linux_64.tar.gz
ln -s $PWD/gmst.pl ~/bin/ # add it to the PATH
# need to apply for a licence at http://topaz.gatech.edu/GeneMark/license_download.cgi
aria2c http://topaz.gatech.edu/GeneMark/tmp/GMtool_ZozF5/gm_key_64.gz
# fix Rnammer
sed -i.bak 's/--cpu 1 //g' /home/ibar/scratch/tools/rnammer-1.2/core-rnammer
# fix signalp
sed -ri.bak 's|SIGNALP} = '\''.+|SIGNALP} = "\$FindBin::RealBin"|; s/BEGIN/use FindBin\;
\nBEGIN/; s/MAX_ALLOWED_ENTRIES=.+/MAX_ALLOWED_ENTRIES=2000000;/' /home/ibar/scratch/tools/signalp-4.1/signalp
cd ~/bin
aria2c -x5 https://github.com/nextflow-io/nextflow/releases/download/v21.10.4/nextflow-21.10.4-all # this is the original version in TransPi installer - it does not support Apptainer!
chmod +x nextflow-21.10.4-all
aria2c -x5 https://github.com/nextflow-io/nextflow/releases/download/v22.11.1-edge/nextflow-22.11.1-edge-all
chmod +x nextflow-22.11.1-edge-all # this is the only version that supports DSL1, Apptainer and has an update Tower plugin!
Test TransPi on subset of reads from one sample
# install bbtools
mamba install -n gwas bbtools
mkdir -p ~/adna/sandbox/TransPi_test
cd !$
echo '#!/bin/bash --login
#SBATCH --job-name=downsample_reads
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%j.log
#SBATCH --cpus-per-task=8
#SBATCH --mem=24G
#SBATCH --time=6:00:00
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
source ~/.bashrc
conda activate gwas
set -Eeo pipefail
cd $SLURM_SUBMIT_DIR
srun reformat.sh in=~/adna/Oyster_QX_Disease/QX_RNA-Seq/data/NN01-561_#.fastq.gz out=NN01-561_#.downsampled.fastq.gz samplerate=0.2' > downsample_reads.slurm
# submit the job
sbatch downsample_reads.slurm
# conda activate nf21 # nextflow version 21.10.6
nextflow-22.11.1-edge-all run /home/ibar/adna/tools/TransPi/TransPi.nf -with-tower --all -profile apptainer,TransPiContainer,bunya -c /home/ibar/.nextflow/bunya.config --k 25,35 --maxReadLen 150 --pipeInstall /scratch/project/adna/tools/TransPi --allBuscos --buscoDist --reads './NN01-561_[1,2].downsampled.fastq.gz'
Run TransPi on the combined and corrected reads from each experimental group
# WORKDIR=/scratch/s2978925/aDNA/niki_qx #Gowonda
WORKDIR=/home/ibar/adna/Oyster_QX_Disease # Bunya
# mamba create -n nf-dsl1 nextflow=22.10.6 # need an older version (<22) to avoid Apptainer warnings
# export NXF_DEFAULT_DSL=1 # only needed if using nextflow version>22.03.0-edge and <22.12.0-edge
# NXF_VER=22.11.1-edge
# export TOWER_ACCESS_TOKEN=eyJ0aWQiOiA0NDYxfS5hNzkzY2M3NTI1YjZkZGFlMjVlZjJiZmIxNTU1MDAyMGE4YjNjZWQ0 # only needed if it is not already set in ~/.bashrc
# export NXF_VER=22.04.5
# prepare the folder and input files
mkdir -p $WORKDIR/SRO_TransPi_assembly/input_files && cd $WORKDIR/SRO_TransPi_assembly
ln -s $WORKDIR//QX_RNA-Seq/data/corrected_reads/unfixrm_SRO_*.gz input_files/
ln -s $WORKDIR//QX_RNA-Seq/data/combined_reads/*.fastq.gz input_files/
# start TransPi (Bunya) [optional tools and databases configured in nextflow.config]
nextflow-22.11.1-edge-all run /home/ibar/adna/tools/TransPi/TransPi.nf -with-tower --all -profile apptainer,TransPiContainer,bunya -c /home/ibar/.nextflow/bunya.config --k 25,35,41,53,65,71,83 --maxReadLen 150 --pipeInstall /scratch/project/adna/tools/TransPi --allBuscos --buscoDist --reads './input_files/*_R[1,2]*.fastq.gz' -resume
# start the run (Gowonda)
# nextflow run $WORKDIR/TransPi/TransPi.nf -with-tower --all --reads './input_files/*_R[1,2]*.fastq.gz' -profile singularity,TransPiContainer,gowonda -c $HOME/.nextflow/GU.config --rRNAfilter --rRNAdb $WORKDIR/TransPi/DBs/rrna_db/SILVA_138.1_Small_and_Large_SURef_NR99_tax_silva_trunc.fasta --withSignalP --signalp $HOME/etc/tools/Annotation/signalp-4.1/signalp --withTMHMM --tmhmm $HOME/etc/tools/Annotation/tmhmm-2.0c/bin/tmhmm --withRnammer --rnam $HOME/etc/tools/Annotation/rnammer-1.2/rnammer --k 25,35,41,53,65,71,83 --maxReadLen 150 --pipeInstall $WORKDIR/TransPi --allBuscos --buscoDist -resume
Run EvidentialGene step of TransPi to collapse redundant transcripts
JOBNAME="onlyEvigene"
NCORES=4
MEM=24
WALLTIME=20:00:00
mkdir -p $WORKDIR/SRO_TransPi_assembly/Evigene_analysis/
cd !$
ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/*SOAP.fa | parallel --dry-run --rpl "{ass} s:.+/(.+).SOAP.fa:\1:" --rpl "{ass_dir} s:.+/(.+).SOAP.fa:$WORKDIR/SRO_TransPi_assembly/Evigene_analysis/\1:" "source ~/.bash_profile; mkdir -p {ass_dir}/onlyEvi && cd {ass_dir}; ln -s $WORKDIR/SRO_TransPi_assembly/results/assemblies/{ass}*[!k0-9].fa {ass_dir}/onlyEvi/; nextflow-22.11.1-edge-all run /home/ibar/adna/tools/TransPi/TransPi.nf -name {ass}_evigene -with-tower --onlyEvi --outdir {ass}_evigene_results -profile apptainer,TransPiContainer,bunya -c /home/ibar/.nextflow/bunya.config" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,PATH=$PATH,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
# resume (if failed)
ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/*SOAP.fa | parallel --dry-run --rpl "{ass} s:.+/(.+).SOAP.fa:\1:" "source ~/.bash_profile; cd $WORKDIR/SRO_TransPi_assembly/Evigene_analysis/{ass}; nextflow-22.11.1-edge-all run /home/ibar/adna/tools/TransPi/TransPi.nf -name {ass}_evigene -with-tower --onlyEvi --outdir {ass}_evigene_results -profile apptainer,TransPiContainer,bunya -c /home/ibar/.nextflow/bunya.config -resume" > $JOBNAME.resume.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.resume.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.resume.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
# Create
The assemblies were annotated with TransPi, using homology searches (BLAST Camacho et al. (2009), and DIAMOND Buchfink et al. (2015)) against the Uniprot_Sprot protein database, as well as using HMM search against the Pfam database of protein families. In addition, signal peptides, ribosomal genes and transmembrane topology were predicted with SignalP v4.1 (Petersen et al. 2011), Rnammer and TMHMM (Krogh et al. 2001), respectively (Emanuelsson et al. 2007).
WORKDIR=/home/ibar/adna/Oyster_QX_Disease # Bunya
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/onlyAnn
ln -s $WORKDIR/SRO_TransPi_assembly/results/assemblies/*Trinity.fa $WORKDIR/SRO_TransPi_assembly/Annotation/onlyAnn/
cd $WORKDIR/SRO_TransPi_assembly/Annotation
nextflow-22.11.1-edge-all run /scratch/project/adna/tools/TransPi/TransPi.nf -with-tower --onlyAnn --outdir Annotation_results -profile apptainer,TransPiContainer,bunya -c /home/ibar/.nextflow/bunya.config --withSignalP --withTMHMM --withRnammer
# create gff and gtf files from selected assemblies
JOBNAME="gtf2gff"
NCORES=4
MEM=24
WALLTIME=20:00:00
cd $WORKDIR/SRO_TransPi_assembly/reslts/assemblies
find $WORKDIR/SRO_TransPi_assembly/results/assemblies -name "*Trinity.fa" | parallel --dry-run "perl ~/etc/tools/trinityrnaseq-v2.11.0/util/misc/cdna_fasta_file_to_transcript_gtf.pl {} > {.}.gtf; agat_convert_sp_gxf2gxf.pl -g {.}.gtf -o {.}.gff" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,PATH=$PATH,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=genomics ${WORKDIR}/array.slurm
rclone copy -P --dry-run --include "*Trinity*" $WORKDIR/SRO_TransPi_assembly/results/assemblies GURS_shared:Oyster_QX_Disease/QX_RNA-Seq/SRO_TransPi_assembly/results/assemblies
In addition to the annotation performed with TransPi, the transcriptomes were annotated against the non-redundant nucleotide and protein databases of the NCBI (nt
and nr
, respectively) to achieve more accurate species-specific annotations.
CONDA_NAME=genomics
mamba install -n $CONDA_NAME gsutil awscli blast
# download NCBI databases
mkdir -p ~/adna/tools/ncbi_db
cd ~/adna/tools/ncbi_db
WORKDIR=/scratch/project/adna/Oyster_QX_Disease
JOBNAME="update_blast_db"
NCORES=8
MEM=16
WALLTIME=20:00:00
parallel --dry-run "update_blastdb.pl --source ncbi --decompress --force --blastdb_version 5 --num_threads 0 {}" ::: nt nr taxdb > $JOBNAME.cmds
# or do it manually
# curl -s -l ftp://ftp.ncbi.nlm.nih.gov/blast/db/v5/ | grep -E "^nr." | parallel --dry-run aria2c -x5 -c --auto-file-renaming=false https://ftp.ncbi.nlm.nih.gov/blast/db/v5/{} > download_blast_nr.cmds
# Prepare a general array Slurm script
echo '#!/bin/bash --login
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%A.%a.log'"
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
set -Eeo pipefail
source ~/.bashrc
conda activate \$CONDA_NAME
cd \$SLURM_SUBMIT_DIR
srun gawk -v ARRAY_IND=\$SLURM_ARRAY_TASK_ID 'NR==ARRAY_IND' \$CMDS_FILE | bash" > ${WORKDIR}/array.slurm
# send to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
# Blastn - this takes too long, see using blast-nf below
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
cd $WORKDIR/SRO_TransPi_assembly/Annotation
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/blastn_tax
JOBNAME="SRO_trans_blastn"
NCORES=24
MEM=96
WALLTIME=150:00:00
ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/*Trinity.fa | parallel --dry-run "blastn -task blastn -db /scratch/project/adna/tools/ncbi_db/nt -query {} -outfmt '6 std stitle staxids ssciname scomname' -evalue 1e-10 -max_target_seqs 20 -num_threads \$SLURM_CPUS_PER_TASK -out Annotation_results/blastn_tax/{/.}.nt_tax.outfmt6" > $JOBNAME.cmds
# submit the commands in the login node
bash $JOBNAME.cmds
# or send to the cluster as a job array
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME --dependency=afterok:8070286 ${WORKDIR}/array.slurm
# annotate proteins - this takes too long, see using blast-nf below
JOBNAME="SRO_trans_blastp"
ls -1 $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/transdecoder/*.transdecoder.pep | parallel --dry-run "blastp -task blastp -db /scratch/project/adna/tools/ncbi_db/nr -query {} -outfmt '6 std stitle staxids ssciname scomname' -evalue 1e-10 -max_target_seqs 20 -num_threads \$SLURM_CPUS_PER_TASK -out Annotation_results/blastp_tax/{/.}.nr_tax.outfmt6" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME --dependency=afterok:8070286 ${WORKDIR}/array.slurm
Regular BLAST is just too slow (mainly on the protein level), so we used a Nextflow implementation of BLAST (v2.16.0) that breaks down the input transcriptome into “chunks” and processes them in parallel on the HPC cluster.
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
cd $WORKDIR/SRO_TransPi_assembly/Annotation
JOBNAME=blastn_tax_nf
NCORES=4
MEM=24
WALLTIME=150:00:00
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME
ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/*Trinity.fa | parallel --dry-run --rpl "{sample} s:.+/(.+)\.Trinity.+:\1:" "~/bin/nextflow-22.11.1-edge-all run ~/etc/tools/blast-nf/main.nf --app blastn --dbDir /scratch/project/adna/tools/ncbi_db --dbName nt --query {} --outfmt \"'6 std stitle staxids ssciname scomname'\" --options '-evalue 1e-10 -max_target_seqs 20' --chunkSize 2500 --outdir $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME/{sample} --outfileName {/.}.nt_tax.outfmt6 -c ~/.nextflow/bunya.config -profile bunya,apptainer -with-tower" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
JOBNAME=blastp_tax_nf
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME
ls -1 $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/transdecoder/*.transdecoder.pep | parallel --dry-run --rpl "{sample} s:.+/(.+)\.Trinity.+:\1:" "~/bin/nextflow-22.11.1-edge-all run ~/etc/tools/blast-nf/main.nf --app blastp --dbDir /scratch/project/adna/tools/ncbi_db --dbName nr --query {} --outfmt \"'6 std stitle staxids ssciname scomname'\" --options '-evalue 1e-10 -max_target_seqs 20' --chunkSize 500 --outdir $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME/{sample} --outfileName {/}.nr_tax.outfmt6 -c ~/.nextflow/bunya.config -profile bunya,apptainer -with-tower" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
Even when using a split-combine strategy using blast-nf
, BLASTp is just too slow (it takes around 50 hours to process a chunk of 500 proteins). Therefore we also used DIAMOND, which can be x1,000 quicker than BLAST (Buchfink et al. 2015).
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
cd /scratch/project/adna/tools
JOBNAME=prep_diamond_db
NCORES=8
MEM=48
WALLTIME=10:00:00
DB_PATH="/scratch/project/adna/tools/ncbi_db"
DB="nr"
# prepare script
echo '#!/bin/bash --login
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=%x.%A.%a.log'"
#SBATCH --account=a_agri_genomics
#SBATCH --partition=general
set -Eeo pipefail
source ~/.bashrc
conda activate \$CONDA_NAME
cd \$SLURM_SUBMIT_DIR
blastdbcmd -entry all -db ${DB_PATH}/${DB} -out ${DB_PATH}/${DB}.fasta
printf \"Finished extracting sequences from ${DB} database as '${DB}.fasta'\n\"
aria2c -x5 -c --auto-file-renaming=false -d ${DB_PATH} https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/prot.accession2taxid.FULL.gz
aria2c -x5 -c --auto-file-renaming=false -d ${DB_PATH} https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdmp.zip
cd ${DB_PATH} && unzip taxdmp.zip
gzip -v -t ${DB_PATH}/prot.accession2taxid.FULL.gz
printf \"\$(date) - Finished validating 'prot.accession2taxid.FULL.gz'.\n\"
diamond makedb -p \$SLURM_CPUS_PER_TASK --in ${DB_PATH}/${DB}.fasta -d ${DB_PATH}/${DB} --taxonmap ${DB_PATH}/prot.accession2taxid.FULL.gz --taxonnodes ${DB_PATH}/nodes.dmp --taxonnames ${DB_PATH}/names.dmp
printf \"\$(date) - Finished preparing Diamond database '${DB}.dmnd'. \n\"
rm ${DB_PATH}/${DB}.fasta
printf \"\$(date) - Removed '${DB}.fasta', script completed. \n\"" > $JOBNAME.slurm
# send the job to the cluster
sbatch --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CONDA_NAME=genomics $JOBNAME.slurm
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
JOBNAME=prot_diamond_tax
cd $WORKDIR/SRO_TransPi_assembly/Annotation
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME
NCORES=12
MEM=96
WALLTIME=50:00:00
ls -1 $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/transdecoder/*.transdecoder.pep | parallel --dry-run --rpl "{sample} s:.+/(.+)\.Trinity.+:\1:" "apptainer run docker://buchfink/diamond:latest blastp -d ~/adna/tools/ncbi_db/nr.dmnd -q {} -p \$SLURM_CPUS_PER_TASK -f 6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore stitle staxids sscinames skingdoms sphylums --sensitive -k 20 -e 0.00001 > $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME/{sample}.diamond_blastp.nr_tax.outfmt6" > $JOBNAME.cmds
# run within an interactive job
grep "unfixrm_SRO_control" $JOBNAME.cmds | bash
# submit the job array to the cluster
sbatch -a 1-2,4 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
# each job took roughly 3:30 hours
Using nf-blast, a Nextflow-enabled version of DIAMOND v2.1.9 (reducing the time to 1.5h)
start_long_interactive_job # start interactive job on Bunya
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
JOBNAME=nf_diamond_blastp_tax
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/$JOBNAME
cd $WORKDIR/SRO_TransPi_assembly/Annotation/$JOBNAME
# mkdir -p $WORKDIR/Annotation/Annotation_results/$JOBNAME
ls -1 $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/transdecoder/*.transdecoder.pep | parallel --dry-run "~/bin/nextflow-22.11.1-edge-all run ~/adna/tools/nf-blast/nf-blast.nf -profile bunya,apptainer,diamond_tax --query {} --app 'diamond blastp' --db ~/adna/tools/ncbi_db/nr --diamondOpts '--very-sensitive -e 1e-10 -k 20' --chunkSize 10000 --outDir $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/$JOBNAME --out {/}.dmnd_blastp.nr_tax.outfmt6 -c ~/.nextflow/bunya.config -with-tower" > $JOBNAME.cmds
# run SRO_control interactively
grep "unfixrm_SRO_control" $JOBNAME.cmds | bash
# or submit all the commands as a job array to the cluster
NCORES=4
MEM=24
WALLTIME=250:00:00
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=base ${WORKDIR}/array.slurm
The predicted proteins in the transcriptomes were further annotated using InterProScan v5.66-98.0 (Jones et al. 2014; Mitchell et al. 2019) to assign protein families, motifs and ontologies to assist with transcript-to-gene annotation. (An alternative method is with FA-nf) Notice the issues mentioned above with SignalP and TMHMM and a new one for Phobius (see discussions and solutions on GitHub and BioStar)
cd ~/adna/tools
start_interactive_job # start an interactive job to download interproscan
IPSCAN_VERSION=5.66-98.0
IPRO_DATA=interproscan-data-${IPSCAN_VERSION}.tar.gz
# http://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/5.66-98.0/alt/interproscan-data-5.66-98.0.tar.gz
DATA_URL="https://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/${IPSCAN_VERSION}/alt/${IPRO_DATA}"
aria2c -x 6 -c $DATA_URL && \
aria2c -c $DATA_URL.md5 && \
md5sum -c ${IPRO_DATA}.md5 && pigz -dc $IPRO_DATA | tar xf -
# download the interproscan container
apptainer pull docker://index.docker.io/interpro/interproscan:${IPSCAN_VERSION} --dir $NXF_APPTAINER_CACHEDIR
# create a link in SINGULARITY_CACHEDIR
ln -s interproscan_${IPSCAN_VERSION}.sif $NXF_APPTAINER_CACHEDIR/
# download the FA-nf pipeline
git clone --recursive https://github.com/guigolab/FA-nf
# copy external software and containers from Research Space
rclone copy -P GURS_shared:Oyster_QX_Disease/FA-nf/containers FA-nf/containers
cd FA-nf/containers/interproscan && tar xzf external_tools.tar.gz && cd -
# ls -1 FA-nf/containers/interproscan/external/tmhmm-2.0c/bin/* | parallel chmod +x {}
# wget https://github.com/chefnb/ParseTargs/raw/master/tmhmm-2.0c/bin/decodeanhmm -O FA-nf/containers/interproscan/external/phobius/decodeanhmm
# chmod +x FA-nf/containers/interproscan/external/signalp-4.1/signalp FA-nf/containers/interproscan/external/phobius/decodeanhmm FA-nf/containers/interproscan/external/phobius/phobius.pl
# copy user-defined settings
cp -r FA-nf/containers/interproscan/.interproscan-5 ~/
# edit path to external programs and databases
# nano ~/.interproscan-5/interproscan.properties
( echo "data.directory=$PWD/interproscan-${IPSCAN_VERSION}/data"; sed -r "s|=.+/FA-nf/containers/interproscan/external|=/home/ibar/scratch/tools|g" $PWD/FA-nf/containers/interproscan/.interproscan-5/interproscan.properties ) > ~/.interproscan-5/interproscan.properties
# sed -i "s|/home/ibar/etc|$PWD|g" ~/.interproscan-5/interproscan.properties
# echo "data.directory=$PWD/interproscan-${IPSCAN_VERSION}/data" >> ~/.interproscan-5/interproscan.properties
# test that interproscan is working
start_interactive_job # make sure it is run on a compute node
apptainer exec FA-nf/containers/interproscan/interproscan_${IPSCAN_VERSION}.sif /opt/interproscan/interproscan.sh
# press the HMM databases
apptainer exec -B $PWD/interproscan-${IPSCAN_VERSION}/data:/opt/interproscan/data $PWD/FA-nf/containers/interproscan/interproscan_${IPSCAN_VERSION}.sif /bin/sh -c "cd /opt/interproscan/; python3 setup.py -f interproscan.properties"
# if the code above isn't working then do it interactively
apptainer shell -B $PWD/interproscan-${IPSCAN_VERSION}/data:/opt/interproscan/data $PWD/FA-nf/containers/interproscan/interproscan_${IPSCAN_VERSION}.sif
cd /opt/interproscan/; python3 setup.py -f interproscan.properties
# annotate proteins
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
cd $WORKDIR/SRO_TransPi_assembly/Annotation
mkdir -p $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan
JOBNAME="SRO_prot_ipscan"
# prepare the commands (don't forget to remove the asterisk at the end of the proteins!)
ls -1 $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/transdecoder/*.transdecoder.pep | parallel --dry-run "sed 's/[*]//g' {} > \$TMPDIR/{/.}.tmp ; apptainer exec -B /home/ibar/scratch/tools -B $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan:/output -B \$TMPDIR:/temp $NXF_SINGULARITY_CACHEDIR/interproscan_${IPSCAN_VERSION}.sif /opt/interproscan/interproscan.sh -i /temp/{/.}.tmp -d /output -pa -dp -goterms -f TSV -T /temp -cpu \$SLURM_CPUS_PER_TASK ; gawk '\$4~/PANTHER/' $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan/{/.}.tsv > $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan/{/.}.panther.tsv" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
cd $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan
# remove the tmp from the filenames
rename -v 's/.tmp//' *
# extract PANTHER results
# ls -1 *.tsv | parallel "gawk '\$4~/PANTHER/' {} > {.}.panther.tsv"
# send results back to Research Space
rclone copy -P $WORKDIR/SRO_TransPi_assembly/Annotation/Annotation_results/ipscan GURS_shared:Oyster_QX_Disease/QX_RNA-Seq/SRO_TransPi_assembly/Annotation_results/ipscan
These annotation tables were added to the Trinotate database of the assemblies.
The transcriptome assemblies were assessed and compared using rnaQuast v2.2.3 (Bushmanova et al. 2016), TransRate v1.0.3 (Smith-Unna et al. 2016) and DETONATE v1.11 (Li et al. 2014), as suggested in the Trinity documentation and by Behera et al. (2021).
The transcripts were compared to the Pacific Oyster (Crassostrea gigas) proteins, which were obtained from NCBI (Accession GCA_902806645.1) (Peñaloza et al. 2021)
CONDA_NAME=rnaseq
mamba create -n $CONDA_NAME transrate ncbi-datasets-cli detonate
WORKDIR='/home/ibar/adna/Oyster_QX_Disease'
conda activate $CONDA_NAME
mkdir -p $WORKDIR/references && cd $WORKDIR/references
datasets download genome accession GCF_902806645.1 --include gff3,rna,cds,protein,genome --filename GCF_902806645.1.zip
unzip GCF_902806645.1.zip
REF_PROT=$WORKDIR/references/ncbi_dataset/data/GCF_902806645.1/protein.faa
# $WORKDIR/SRO_TransPi_assembly/results/assemblies
# install the ORP version of transrate
cd ~/etc/tools
wget https://github.com/macmanes-lab/Oyster_River_Protocol/raw/master/software/orp-transrate.tar.gz
tar xzf orp-transrate.tar.gz
# or install the ORP container
apptainer pull docker://macmaneslab/orp:2.3.3 --dir $NXF_APPTAINER_CACHEDIR
cd $WORKDIR/SRO_TransPi_assembly/
NCORES=24
MEM=96
WALLTIME=100:00:00
#mkdir -p $WORKDIR/SRO_TransPi_assembly/results/transrate
# cp /home/ibar/adna/Oyster_QX_Disease/SRO_TransPi_assembly/work/27/72b07a2131e97971f644da74fc9dc6/unfixrm_SRO_QX_no_rRNA.R1.fq /home/ibar/adna/Oyster_QX_Disease/SRO_TransPi_assembly/work/2b/f20013a2ae897e646817eac5d0997c/SRO_QX_no_rRNA.R1.fq /home/ibar/adna/Oyster_QX_Disease/SRO_TransPi_assembly/work/e5/25ba5d1510380c3631ee6c18bf2e89/SRO_control_no_rRNA.R1.fq /home/ibar/adna/Oyster_QX_Disease/SRO_TransPi_assembly/work/81/5bd2ffe863bf0ee37388694b4ad254/unfixrm_SRO_control_no_rRNA.R1.fq $WORKDIR/SRO_TransPi_assembly/input_files/
# use no_rRNA.R1.fq files as input
JOBNAME="transrate_no_rRNA"
OUTDIR=$WORKDIR/SRO_TransPi_assembly/results/$JOBNAME # or OUTDIR=~/scratch/sandbox/SRO_TransPi_assembly/$JOBNAME
mkdir -p $OUTDIR
parallel --dry-run --rpl "{sample} s:.+/(.+)_no_rRNA.R1.fq:\1:" --rpl "{read2} s:.R1:.R2:" "mkdir -p $OUTDIR/{sample}; ASSEMBLIES=\$(ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/{sample}*[!k0-9].fa | gawk -v ORS=\",\" '1' | sed 's/,$//'); $HOME/etc/tools/orp-transrate/transrate --assembly \$ASSEMBLIES --left {} --right {read2} --threads \$SLURM_CPUS_PER_TASK --reference $REF_PROT --output $OUTDIR/{sample}" ::: $(ls -1 $WORKDIR/SRO_TransPi_assembly/input_files/*SRO*no_rRNA.R1.fq) > $JOBNAME.cmds
# --merge-assemblies {sample}.merged.fa
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
# use normalised (.norm.fq) files as input
JOBNAME="transrate_norm"
OUTDIR=$WORKDIR/SRO_TransPi_assembly/results/$JOBNAME # or OUTDIR=$HOME/scratch/sandbox/SRO_TransPi_assembly/$JOBNAME
mkdir -p $OUTDIR
parallel --dry-run --rpl "{sample} s:.+/\w+-(.+).norm.fq:\1:" --rpl "{read2} s:left-:right-:" "mkdir -p $OUTDIR/{sample}; ASSEMBLIES=\$(ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/{sample}*[!k0-9].fa | gawk -v ORS=\",\" '1' | sed 's/,$//'); $HOME/etc/tools/orp-transrate/transrate --assembly \$ASSEMBLIES --left {} --right {read2} --threads \$SLURM_CPUS_PER_TASK --reference $REF_PROT --output $OUTDIR/{sample}" ::: $(find $WORKDIR/SRO_TransPi_assembly/work -type f -name left-*SRO*.norm.fq) > $JOBNAME.cmds
# --merge-assemblies {sample}.merged.fa
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
The chosen most representative transcriptome assembly for SRO (Rcorrected SRO non-infected TransPi assembly) was used as reference to quantify gene/transcript abundance using Salmon (Patro et al. 2017; Love et al. 2018).
CONDA_NAME=genomics
mamba install -n $CONDA_NAME kallisto salmon
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
mkdir -p $WORKDIR/SRO_TransPi_assembly/Gene_expression
cd !$
JOBNAME="Salmon_index"
NCORES=12
MEM=64
WALLTIME=10:00:00
parallel --dry-run "salmon index -t {} -i {/.}.index" ::: $(ls -1 $WORKDIR/SRO_TransPi_assembly/results/assemblies/*Trinity.fa ) > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
DATE=`date +%d_%m_%Y`
JOBNAME="Salmon_quant_$DATE"
RUNDIR=$WORKDIR/SRO_TransPi_assembly/Gene_expression/$JOBNAME
mkdir -p $RUNDIR && cd $RUNDIR
parallel --dry-run --rpl "{sample} s:.+/(.+)_1.fastq.gz:\1:" --rpl "{read2} s:_1:_2:" "salmon quant -i $WORKDIR/SRO_TransPi_assembly/Gene_expression/unfixrm_SRO_control.Trinity.index -l A -1 {} -2 {read2} -o {sample}_salmon_eq --numBootstraps 500 --useVBOpt --seqBias --dumpEq -p \$SLURM_CPUS_PER_TASK --validateMappings" ::: $(ls -1 $WORKDIR/QX_RNA-Seq/data/NN01-5*_1.fastq.gz ) > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1-$(cat $JOBNAME.cmds | wc -l)%10 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
Expression-based clustering of transcripts was performed with RapClust (Srivastava et al. 2016).
CONDA_NAME=genomics
conda activate $CONDA_NAME
mamba install mcl # need to install mcl
pip install git+https://github.com/IdoBar/RapClust@patch-1
# mamba install -n $CONDA_NAME kallisto salmon rapclust
WORKDIR=/home/ibar/adna/Oyster_QX_Disease
cd $WORKDIR/SRO_TransPi_assembly/Gene_expression/Salmon_quant_14_06_2024
# unzip the eq_classes files
find . -name eq_classes.txt.gz | parallel "gzip -cd {} > {.}"
JOBNAME="SRO_rapclust"
NCORES=12
MEM=64
WALLTIME=10:00:00
# prepare config file for RapClust
find . -maxdepth 1 -type d -name "NN01-*salmon_eq" | sort | gawk -F "/" 'BEGIN{printf "conditions:\n - SRO_Control\n - SRO_QX\nsamples:\n SRO_Control:"}{printf "\n - %s", $2}NR==3{printf "\n SRO_QX:"}END{printf "\noutdir: SRO_rapclust\n"}' > $JOBNAME.yaml
# prepare SLURM script
echo "RapClust --config $JOBNAME.yaml" > $JOBNAME.cmds
# submit the job array to the cluster
sbatch -a 1 --job-name=$JOBNAME --cpus-per-task=$NCORES --mem=${MEM}G --time=$WALLTIME --export=ALL,CMDS_FILE=$JOBNAME.cmds,CONDA_NAME=$CONDA_NAME ${WORKDIR}/array.slurm
This document was last updated at 2025-01-15 00:10:00 using R Markdown (built with R version 4.2.1 (2022-06-23 ucrt)). The source code for this webpage can be found at https://github.com/IdoBar/QX_bioinfo_analysis.
Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. It is especially powerful at authoring documents and reports which include code and can execute code and use the results in the output. For more details on using R Markdown see http://rmarkdown.rstudio.com and Rmarkdown cheatsheet.