diff --git a/rsfMRI/S01_AnatPrep_RR.sh b/rsfMRI/S01_AnatPrep_RR.sh new file mode 100755 index 0000000..b8d0327 --- /dev/null +++ b/rsfMRI/S01_AnatPrep_RR.sh @@ -0,0 +1,128 @@ +# =============================================================== +# Date: 11/12/2015 +# Authors: Javier Gonzalez-Castillo +# +# Inputs: +# * SubjectID is the only input parameter +# +# Outputs: +# * Skull-strip PD +# * Intracranial mask based on PD +# * Create Skull Stripped and Bias Corrected MP-RAGE +# * Perform trasformation to MNI space +# +# =============================================================== + +# COMMON STUFF +# ============ +source ./00_CommonVariables.sh +set -e +minp=11 # Minimum patch size as recommended by BOB for the NL Alignment +usePD4mask=0 # Use PD for computing intra-cranial mask (helps keep the TLs) + +# READ INPUT PARAMETERS +# ===================== +# INPUT VARIABLES REQUIRED +# ======================== +if [[ -z "${SBJ}" ]]; then + echo "You need to provide SBJ as an environment variable" + exit +fi +### if [ $# -ne 1 ]; then +### echo "Usage: $basename $0 SBJID" +### exit +### fi +### SBJ=$1 + + +# CREATE DIRECTORIES AND COPY FILES +# ================================= +cd ${PRJDIR}/PrcsData/${SBJ} +echo -e "\033[0;33m++ INFO: Creating D01_Anatomical Directory and linking necessary files.\033[0m" +if [ ! -d D01_Anatomical ]; then mkdir D01_Anatomical; fi +cd D01_Anatomical +ln -fvs ${PRJDIR}/Scripts/MNI152_T1_2009c_uni+tlrc.* . +ln -fvs ../D00_OriginalData/${SBJ}_t1_memprage*.nii.gz . + +# COMBINE THE DIFFERNET ECHOES +# ============================ +echo -e "\033[0;33m++ INFO: Averaging all ME-MEMPRAGE datasets into $SBJ_Anat+orig.\033[0m" +3dMean -overwrite -prefix ${SBJ}_Anat ${SBJ}_t1_memprage.nii.gz +# CREATE INTRACRANIAL MASK +# ======================== +echo -e "\033[0;33m++ INFO: Intensity Bias Correction for Anatomical scan.\033[0m" +3dUnifize -overwrite -prefix ${SBJ}_Anat_bc -input ${SBJ}_Anat+orig. -GM -ssave ${SBJ}_Anat.UN.Bias + +if [ "${noisyANAT}" -eq "1" ]; then + if [ "${usePD4mask}" -eq "1" ]; then + echo -e "\033[0;33m++ INFO: Skull-stripping via noisy appraoach [BASED ON PD]\033[0m" + @NoisySkullStrip -input ${SBJ}_Anat_PD_bc+orig + 3dcalc -a ${SBJ}_Anat_PD_bc.ns+orig -expr 'step(a)' -overwrite -prefix ${SBJ}_Anat_mask + rm ${SBJ}_Anat_PD_bc.ns+orig.* + rm ${SBJ}_Anat_PD_bc.skl+orig.* + rm ${SBJ}_Anat_PD_bc.nsm+orig.* + rm ${SBJ}_Anat_PD_bc.ma+orig.* + rm ${SBJ}_Anat_PD_bc.lsp+orig.* + rm ${SBJ}_Anat_PD_bc.ls+orig.* + rm ${SBJ}_Anat_PD_bc.air+orig.* + rm __MA_h.1D + rm __MA_hd.1D + else + echo -e "\033[0;33m++ INFO: Skull-stripping via noisy appraoach [BASED ON MPRAGE]\033[0m" + @NoisySkullStrip -input ${SBJ}_Anat_bc+orig + 3dcalc -a ${SBJ}_Anat_bc.ns+orig -expr 'step(a)' -overwrite -prefix ${SBJ}_Anat_mask + rm ${SBJ}_Anat_bc.ns+orig.* + rm ${SBJ}_Anat_bc.skl+orig.* + rm ${SBJ}_Anat_bc.nsm+orig.* + rm ${SBJ}_Anat_bc.ma+orig.* + rm ${SBJ}_Anat_bc.lsp+orig.* + rm ${SBJ}_Anat_bc.ls+orig.* + rm ${SBJ}_Anat_bc.air+orig.* + rm __MA_h.1D + fi +else + echo -e "\033[0;32m++ INFO: Skull-stripping via regular approach \033[0m" + 3dSkullStrip -prefix ${SBJ}_Anat_mask -ld 33 -niter 777 -shrink_fac_bot_lim 0.777 -exp_frac 0.0666 -input ${SBJ}_Anat_bc+orig + 3dcalc -a ${SBJ}_Anat_mask+orig. -expr 'step(a)' -overwrite -prefix ${SBJ}_Anat_mask +fi + +###echo -e "\033[0;33m++ MANUAL INTERVENTION NEEDED: Check that the mask is ok, and correct when necessary [${SBJ}_Anat_mask] \033[0m" +###read -n1 -rsp $'Press ENTER to continue:' +3dcalc -overwrite -a ${SBJ}_Anat_bc+orig -m ${SBJ}_Anat_mask+orig -expr 'a*m' -prefix ${SBJ}_Anat_bc_ns +# CONVERT ANATOMICAL TO MNI SPACE +# =============================== +if [ "${doNL}" -eq "1" ]; then + echo -e "\033[0;33m++ INFO: Alignment to MNI space will be via non-linear registration \033[0m" + 3dQwarp -overwrite \ + -allineate \ + -blur -3 -3 -iwarp -duplo \ + -workhard:0:4 \ + -noneg \ + -pblur \ + -minpatch ${minp} \ + -base MNI152_T1_2009c_uni+tlrc \ + -source ${SBJ}_Anat_bc_ns+orig \ + -prefix ${SBJ}_Anat_bc_ns + +else + echo -e "\033[0;33m++ INFO: Alignment to MNI space will be via linear registration \033[0m" + @auto_tlrc -overwrite -base MNI152_T1_2009c_uni+tlrc -input ${SBJ}_Anat_bc_ns+orig. -no_ss -twopass + cat_matvec -ONELINE ${SBJ}_Anat_bc_ns+tlrc::WARP_DATA > ${SBJ}_MNI2Anat.Xaff12.1D + cat_matvec -ONELINE ${SBJ}_MNI2Anat.Xaff12.1D -I > ${SBJ}_Anat2MNI.Xaff12.1D + rm ${SBJ}_Anat_bc_ns_WarpDrive.log + rm ${SBJ}_Anat_bc_ns.Xaff12.1D + rm ${SBJ}_Anat_bc_ns.Xat.1D +fi + +# Create Autobox version for visualization +# ======================================== +3dAutobox -overwrite -input ${SBJ}_Anat_bc_ns+tlrc -prefix ${SBJ}_Anat_bc_ns.AB + +# GZIP All files +# ============== +gzip ${SBJ}_Anat*BRIK +echo -e "\033[0;32m++ INFO: ================================================= \033[0m" +echo -e "\033[0;32m++ INFO: ========= SCRIPTS FINISHED SUCCESSFULY ====== \033[0m" +echo -e "\033[0;32m++ INFO: ================================================= \033[0m" + + diff --git a/rsfMRI/S02_Preprocess_SingleEcho_RR.sh b/rsfMRI/S02_Preprocess_SingleEcho_RR.sh new file mode 100755 index 0000000..1666d52 --- /dev/null +++ b/rsfMRI/S02_Preprocess_SingleEcho_RR.sh @@ -0,0 +1,318 @@ +source ./00_CommonVariables.sh +set -e + +# INPUT VARIABLES REQUIRED +# ======================== +if [[ -z "${SBJ}" ]]; then + echo "You need to provide SBJ as an environment variable" + exit +fi +if [[ -z "${RUN}" ]]; then + echo "You need to provide RUN as an environment variable" + exit +fi +set -e +# NEED TO CREATE FULL BRAIN MASK +# ============================== +MASK_FB_ORIG=`echo ${SBJ}_${RUN}.REF.mask.FBrain.nii.gz` +# This program will generate a copy of the mask in MNI space +MASK_FB_MNI=`echo ${SBJ}_${RUN}.REF.mask.FBrain.MNI.nii.gz` +MNI_MASTER_TEMPLATE=`echo ${PRJDIR}/Scripts/MNI152_T1_2009c_uni.LR2iso+tlrc` +# CREATING AND ENTERING IN TARGET DIR +# =================================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Creating Target Dir <----------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +cd ../PrcsData/${SBJ}/ +if [ ! -d D02_Preprocessed ]; then + echo "\033[0;31m+++ INFO: Creating directory [D02_Preprocessed]\033[0m" + mkdir D02_Preprocessed +fi + +echo "\033[0;31m+++ INFO: Entering directory [D02_Preprocessed]\033[0m" +cd D02_Preprocessed +echo "\033[0;31m+++ INFO: Creating links to data files...\033[0m" + +for e in 01 02 03 +do + if [ ! -f ${SBJ}_${RUN}_E${e}.nii.gz ]; then ln -fvs ../D00_OriginalData/${SBJ}_${RUN}_E${e}.nii.gz .; fi +done + +if [ ! -f ${SBJ}_${RUN}_Echoes.1D ]; then ln -fvs ../D00_OriginalData/${SBJ}_${RUN}_Echoes.1D .; fi +if [ ! -f ${SBJ}_epi_forward-e01.nii.gz ]; then ln -fvs ../D00_OriginalData/${SBJ}_epi_forward-e01.nii.gz .; fi +if [ ! -f ${SBJ}_epi_reverse-e01.nii.gz ]; then ln -fvs ../D00_OriginalData/${SBJ}_epi_reverse-e01.nii.gz .; fi +# DISCARD FIRST 2 VOLUMES +# ======================= +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Remove initial 2 volumes <----------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + echo "\033[0;31m+++ INFO: Working on echo E${e} ...\033[0m" + 3dcalc -overwrite -a ${SBJ}_${RUN}_E${e}.nii.gz'[2..$]' -expr a -prefix pc00.${SBJ}_${RUN}_E${e}.discard.nii.gz +done + +# COUNT OUTLIERS +# ============== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ----------------------------> Count Outliers <-----------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + 3dToutcount -overwrite -automask -fraction -polort 3 -legendre pc00.${SBJ}_${RUN}_E${e}.discard.nii.gz > pc00.${SBJ}_${RUN}_E${e}.outcount.1D + 1deval -a pc00.${SBJ}_${RUN}_E${e}.outcount.1D -expr "1-step(a-0.1)" > pc00.${SBJ}_${RUN}_E${e}.out.cen.1D +done + +# SLICE TIMING CORRECTION +# ======================= +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Slice Time Correction <--------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + echo "\033[0;31m+++ INFO: Working on echo E${e} ...\033[0m" + 3dTshift -overwrite -Fourier -tzero 0 \ + -prefix pc01.${SBJ}_${RUN}_E${e}.tshift.nii.gz \ + pc00.${SBJ}_${RUN}_E${e}.discard.nii.gz +done + +# BLIP UP/BLIP DOWN CORRECTION +# ============================ +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Blip Up / Blip Down <--------------------------------------\033[0m" +echo -e "\033[0;36m+++ ======================================================================================\033[0m" + +# (1) Create Median Datasets +3dTstat -overwrite -median -prefix rm.${SBJ}_${RUN}_E01.blip.med.fwd ${SBJ}_epi_forward-e01.nii +3dTstat -overwrite -median -prefix rm.${SBJ}_${RUN}_E01.blip.med.rev ${SBJ}_epi_reverse-e01.nii + +# (2) Automask Median Datasets +3dAutomask -overwrite -apply_prefix rm.${SBJ}_${RUN}_E01.blip.med.masked.fwd rm.${SBJ}_${RUN}_E01.blip.med.fwd+orig +3dAutomask -overwrite -apply_prefix rm.${SBJ}_${RUN}_E01.blip.med.masked.rev rm.${SBJ}_${RUN}_E01.blip.med.rev+orig + +# (3) Compute the midpoint warp between the median datasets +3dQwarp -overwrite -plusminus -pmNAMES Rev For \ + -pblur 0.05 0.05 -blur -1 -1 \ + -noweight -minpatch 9 \ + -source rm.${SBJ}_${RUN}_E01.blip.med.masked.rev+orig \ + -base rm.${SBJ}_${RUN}_E01.blip.med.masked.fwd+orig \ + -prefix ${SBJ}_${RUN}_E01.blip_warp + +# (4) Warp EPI Timeseries +for e in 01 02 03 +do + 3dNwarpApply -overwrite -quintic -nwarp ${SBJ}_${RUN}_E01.blip_warp_For_WARP+orig \ + -source pc01.${SBJ}_${RUN}_E${e}.tshift.nii.gz \ + -prefix pc02.${SBJ}_${RUN}_E${e}.blip.nii.gz + 3drefit -atrcopy ${SBJ}_epi_reverse-e01.nii.gz IJK_TO_DICOM_REAL \ + pc02.${SBJ}_${RUN}_E${e}.blip.nii.gz +done +3dAutomask -overwrite -eclip -clfrac 0.5 -prefix ${MASK_FB_ORIG} pc02.${SBJ}_${RUN}_E01.blip.nii.gz + +rm rm.${SBJ}_${RUN}_E01.blip.med.fwd+orig.* +rm rm.${SBJ}_${RUN}_E01.blip.med.masked.fwd+orig.* +rm rm.${SBJ}_${RUN}_E01.blip.med.masked.rev+orig.* +rm rm.${SBJ}_${RUN}_E01.blip.med.rev+orig.* + +# COMPUTE STATIC S0 AND T2S MAPS +# ============================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Compute Static Maps <----------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dZcat -overwrite -prefix pc02.${SBJ}_${RUN}_ZCAT.blip.nii.gz pc02.${SBJ}_${RUN}_E??.blip.nii.gz + +module load python/3.6 +# module load Anaconda +# Environment originally created with: +# conda create --prefix /data/SFIMJGC/PRJ_MEPFM/Apps/envs/mepfm_p36 python=3.6 numpy scipy scikit-learn seaborn nibabel pandas matplotlib sphinx +# source activate /data/RS_preprocess/Apps/envs/inati_meica_p27 +python /data/Epilepsy_EEG/Apps/SFIM_ME/rt_tools/me_get_staticT2star.py -d pc02.${SBJ}_${RUN}_ZCAT.blip.nii.gz \ + --tes_file ${SBJ}_${RUN}_Echoes.1D \ + --non_linear \ + --prefix pc02.${SBJ}_${RUN}_STATIC \ + --mask ${MASK_FB_ORIG} +# source deactivate +rm pc02.${SBJ}_${RUN}_ZCAT.blip.nii.gz + +# CREATE CSF MASK +# =============== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Creating CSF Mask <------------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dmask_tool -overwrite -inputs ${MASK_FB_ORIG} -dilate_inputs -3 -prefix rm.${SBJ}_${RUN}.eroded.nii.gz +3dcalc -overwrite -a pc02.${SBJ}_${RUN}_STATIC.sTE.t2s.nii \ + -m rm.${SBJ}_${RUN}.eroded.nii.gz \ + -expr 'ispositive(a-100)*m' \ + -prefix rm.${SBJ}_${RUN}.REF.mask.CSF.nii.gz +3dclust -1Dformat -overwrite -nosum -1dindex 0 -1tindex 0 -2thresh -0.5 0.5 -dxyz=1 \ + -savemask ${SBJ}_${RUN}.REF.mask.CSF.nii.gz 1.01 20 \ + rm.${SBJ}_${RUN}.REF.mask.CSF.nii.gz +3dcalc -overwrite -a ${SBJ}_${RUN}.REF.mask.CSF.nii.gz -expr 'ispositive(a)' -prefix ${SBJ}_${RUN}.REF.mask.CSF.nii.gz +rm rm.${SBJ}_${RUN}.eroded.nii.gz +rm rm.${SBJ}_${RUN}.REF.mask.CSF.nii.gz + +# HEAD MOTION CORRECTION +# ====================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Head motion / Aligment <-------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;31m+++ INFO: Estimating Motion Parameters using the first echo...\033[0m" +REF4VOLREG=`echo ${SBJ}_${RUN}.REF.nii.gz` +3dbucket -overwrite -prefix ${REF4VOLREG} pc02.${SBJ}_${RUN}_E01.blip.nii.gz'[0]' + +3dvolreg -overwrite -verbose -zpad 1 \ + -1Dmatrix_save ${SBJ}_${RUN}_matrix_intrarun \ + -maxdisp1D ${SBJ}_${RUN}_MaxMot.1D \ + -1Dfile ${SBJ}_${RUN}_Motion.1D \ + -base ${REF4VOLREG} \ + -prefix rm.${SBJ}_${RUN}_E01.volreg.nii.gz \ + pc02.${SBJ}_${RUN}_E01.blip.nii.gz + +rm rm.${SBJ}_${RUN}_E01.volreg.nii.gz + +1d_tool.py -overwrite -infile ${SBJ}_${RUN}_Motion.1D -set_nruns 1 -demean -write ${SBJ}_${RUN}_Motion.demean.1D +1d_tool.py -overwrite -infile ${SBJ}_${RUN}_Motion.1D -set_nruns 1 -derivative -demean -write ${SBJ}_${RUN}_Motion.demean.der.1D +1d_tool.py -overwrite -infile ${SBJ}_${RUN}_MaxMot.1D -derivative -write ${SBJ}_${RUN}_MaxMot.rel.1D + +# ALGIN ANATOMICAL TO REFERENCE EPI +# ================================= +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ --------------------> Copying Anat 2 MNI Transformations <----------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +if [ ! -f ${SBJ}_Anat_bc_ns+orig.HEAD ]; then ln -s ../D01_Anatomical/${SBJ}_Anat_bc_ns+orig.* .; fi +if [ ! -f ${SBJ}_Anat_bc_ns+tlrc.HEAD ]; then ln -s ../D01_Anatomical/${SBJ}_Anat_bc_ns+tlrc.* .; fi +if [ ! -f ${SBJ}_Anat_bc_ns.AB+tlrc.HEAD ]; then ln -s ../D01_Anatomical/${SBJ}_Anat_bc_ns.AB+tlrc.* .; fi +if [ "${doNL}" -eq "0" ]; then + if [ ! -L ${SBJ}_MNI2Anat.Xaff12.1D ]; then ln -s ../D01_Anatomical/${SBJ}_MNI2Anat.Xaff12.1D .; fi + if [ ! -L ${SBJ}_Anat2MNI.Xaff12.1D ]; then ln -s ../D01_Anatomical/${SBJ}_Anat2MNI.Xaff12.1D .; fi +else + if [ ! -L ${SBJ}_Anat_bc_ns_WARP+tlrc.HEAD ]; then ln -s ../D01_Anatomical/${SBJ}_Anat_bc_ns_WARP+tlrc.* .; fi +fi + +echo -e "\033[0;31m++ INFO: Bias Correct EPI reference...\033[0m" +REF4ANAT=`echo ${SBJ}_${RUN}.REF.bc.nii.gz` +REF4ANAT_NS=`echo ${SBJ}_${RUN}.REF.bc.ns.nii.gz` +3dresample -overwrite -inset ../D01_Anatomical/${SBJ}_Anat.UN.Bias+orig -master ${REF4VOLREG} -prefix ${SBJ}_${RUN}.REF.Bias.nii.gz +3drefit -atrcopy ${SBJ}_epi_reverse-e01.nii.gz IJK_TO_DICOM_REAL ${SBJ}_${RUN}.REF.Bias.nii.gz + +3dcalc -overwrite \ + -a ${SBJ}_${RUN}.REF.Bias.nii.gz \ + -b ${REF4VOLREG} \ + -expr 'b*a' \ + -prefix ${REF4ANAT} + +echo -e "\033[0;31m++ INFO: Skull strip EPI reference...\033[0m" +3dcalc -overwrite \ + -a ${REF4ANAT} \ + -m ${MASK_FB_ORIG} \ + -exp 'a*m' \ + -prefix ${REF4ANAT_NS} + +echo "\033[0;31m++ INFO: Zero pad anat (just in case a large displacement is needed)... \033[0m" +3dZeropad -overwrite -A 10 -P 10 -I 10 -S 10 -R 10 -L 10 -prefix ${SBJ}_Anat_bc_ns.pad ${SBJ}_Anat_bc_ns+orig + +echo "\033[0;31m++ INFO: Align ANAT to REF EPI... \033[0m" +align_epi_anat.py -anat ${SBJ}_Anat_bc_ns.pad+orig \ + -epi ${REF4ANAT_NS} \ + -epi_base 0 -anat2epi -anat_has_skull no \ + -epi_strip None \ + -deoblique on -giant_move \ + -master_anat SOURCE -overwrite + +echo "\033[0;31m++ INFO: Autobox... \033[0m" +3dAutobox -overwrite -prefix ${SBJ}_Anat_bc_ns_al${RUN} -npad 3 ${SBJ}_Anat_bc_ns.pad_al+orig +3dcopy -overwrite ${SBJ}_Anat_bc_ns_al${RUN}+orig ${SBJ}_Anat_bc_ns_al${RUN}.nii.gz +#rm ${SBJ}_Anat_bc_ns_al+orig.* +rm ${SBJ}_Anat_bc_ns.pad+orig.* +rm ${SBJ}_Anat_bc_ns.pad_al+orig.* + +mv ${SBJ}_Anat_bc_ns.pad_al_mat.aff12.1D ${SBJ}_${RUN}.Anat2REF.Xaff12.1D +if [ -f ${SBJ}_Anat_bc_ns.pad_al_e2a_only_mat.aff12.1D ]; then rm ${SBJ}_Anat_bc_ns.pad_al_e2a_only_mat.aff12.1D; fi +#exit + +# CREATE FINAL TRANSFORMATION MATRIX +# ================================== +echo "\033[0;31m++ INFO: Create all transformation matrices \033[0m" +cat_matvec -ONELINE ${SBJ}_${RUN}.Anat2REF.Xaff12.1D -I > ${SBJ}_${RUN}.REF2Anat.Xaff12.1D + +echo "\033[0;31m++ INFO: Apply Non Linear Transform and Mot Correction \033[0m" +for e in 01 02 03 +do +echo "\033[0;31m++ INFO: Working on Echo E${e} ...\033[0m" +3dNwarpApply -overwrite \ + -source pc01.${SBJ}_${RUN}_E${e}.tshift.nii.gz \ + -nwarp ''''../D01_Anatomical/${SBJ}'''_Anat_bc_ns_WARP+tlrc '''${SBJ}'''_'''${RUN}'''.REF2Anat.Xaff12.1D '''${SBJ}'''_'''${RUN}'''_matrix_intrarun.aff12.1D '''${SBJ}'''_'''${RUN}'''_E01.blip_warp_For_WARP+orig'\ + -master ${MNI_MASTER_TEMPLATE} \ + -prefix pc03.${SBJ}_${RUN}_E${e}.volreg.nii.gz +done + +echo "\033[0;31m++ INFO: Working on Static S0 map...\033[0m" +3dNwarpApply -overwrite \ + -source pc02.${SBJ}_${RUN}_STATIC.sTE.S0.nii \ + -nwarp ''''../D01_Anatomical/${SBJ}'''_Anat_bc_ns_WARP+tlrc '''${SBJ}'''_'''${RUN}'''.REF2Anat.Xaff12.1D '''${SBJ}'''_'''${RUN}'''_matrix_intrarun.aff12.1D '''${SBJ}'''_'''${RUN}'''_E01.blip_warp_For_WARP+orig' \ + -master ${MNI_MASTER_TEMPLATE} \ + -prefix ${SBJ}_${RUN}_STATIC.sTE.S0.MNI.nii + +echo "\033[0;31m++ INFO: Working on Full Brain Mask... \033[0m" +3dNwarpApply -overwrite -ainterp NN \ + -source ${MASK_FB_ORIG} \ + -nwarp ''''../D01_Anatomical/${SBJ}'''_Anat_bc_ns_WARP+tlrc '''${SBJ}'''_'''${RUN}'''.REF2Anat.Xaff12.1D '''${SBJ}'''_'''${RUN}'''_matrix_intrarun.aff12.1D '''${SBJ}'''_'''${RUN}'''_E01.blip_warp_For_WARP+orig' \ + -master ${MNI_MASTER_TEMPLATE} \ + -prefix ${MASK_FB_MNI} + +# REGRESS SIGNALS OF NO INTEREST +# ============================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Create Physio Regressor <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + echo "\033[0;31m++ INFO: Working on Echo E${e} ...\033[0m" + 3dpc -overwrite -dmean -pcsave 5 -mask ${SBJ}_${RUN}.REF.mask.CSF.nii.gz -prefix ${SBJ}_${RUN}_E${e}.CSF.PCA pc01.${SBJ}_${RUN}_E${e}.tshift.nii.gz + rm ${SBJ}_${RUN}_E${e}.CSF.PCA??.1D + rm ${SBJ}_${RUN}_E${e}.CSF.PCA_eig.1D + rm ${SBJ}_${RUN}_E${e}.CSF.PCA+orig.* +done + +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Remove Nuisance Signals <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + echo "\033[0;31m++ INFO: Working on Echo E${e} ...\033[0m" + 3dTproject -overwrite \ + -mask ${MASK_FB_MNI} \ + -input pc03.${SBJ}_${RUN}_E${e}.volreg.nii.gz \ + -prefix pc04.${SBJ}_${RUN}_E${e}.project.nii.gz \ + -blur 6 \ + -polort 5 \ + -ort ${SBJ}_${RUN}_Motion.demean.1D \ + -ort ${SBJ}_${RUN}_Motion.demean.der.1D \ + -ort ${SBJ}_${RUN}_E${e}.CSF.PCA_vec.1D +done + +# CONVERT TO SPC +# ============== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Convert to SPC <---------------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +for e in 01 02 03 +do + echo "\033[0;31m++ INFO: Working on Echo E${e} ...\033[0m" + # (1) Compute a similarly blur version of the data without the regression (To obtain the mean signal per voxel) + 3dBlurInMask -overwrite -FWHM 6 \ + -mask ${MASK_FB_MNI} \ + -input pc03.${SBJ}_${RUN}_E${e}.volreg.nii.gz \ + -prefix pc04.${SBJ}_${RUN}_E${e}.blur.nii.gz + # (2) Compute the mean signal per voxel using this blurred version + 3dTstat -overwrite -mean \ + -prefix pc04.${SBJ}_${RUN}_E${e}.MEAN.nii.gz \ + pc04.${SBJ}_${RUN}_E${e}.blur.nii.gz + # (3) As the project version has no mean already, all that's left is the division + 3dcalc -overwrite -a pc04.${SBJ}_${RUN}_E${e}.project.nii.gz \ + -m pc04.${SBJ}_${RUN}_E${e}.MEAN.nii.gz \ + -expr 'a/m' \ + -prefix pc05.${SBJ}_${RUN}_E${e}.spc.nii.gz + # (4) Remove the blurred version used to compute the MEAN (to avoid confussion) + rm pc04.${SBJ}_${RUN}_E${e}.blur.nii.gz +done diff --git a/rsfMRI/S03_Preprocess_OC.sh b/rsfMRI/S03_Preprocess_OC.sh new file mode 100755 index 0000000..be86e03 --- /dev/null +++ b/rsfMRI/S03_Preprocess_OC.sh @@ -0,0 +1,186 @@ +# ============================================================================ +# Author: Javier Gonzalez-Castillo +# Date: November/12/2017 +# +# Purpose: +# Run the OC pipeline +# Usage: +# export SBJ=SBJ01 RUN=Event01; sh ./S04_Preprocess_OC.sh +# +# ============================================================================ +Numjobs=6 +# Check for input parameters +# ========================== +if [[ -z "${SBJ}" ]]; then + echo "You need to provide SBJ as an environment variable" + exit +fi +if [[ -z "${RUN}" ]]; then + echo "You need to provide RUN as an environment variable" + exit +fi +set -e + +# Common Stuff +# ============ +source ./00_CommonVariables.sh +MASK_FB_MNI=`echo ${SBJ}_${RUN}.REF.mask.FBrain.MNI.nii.gz` +# Enter the D02_Preprocessed directory +# =============================== +cd ${PRJDIR}/PrcsData/${SBJ} + +if [ ! -d D02_Preprocessed ]; then + echo "Pre-processing directory does not exits." + exit +fi + +cd D02_Preprocessed + +# Load all necessary info: Number of echoes, echo files, etc... +# ============================================================= +Ne=`ls ../D00_OriginalData/${SBJ}_${RUN}_E??.nii.gz | wc -l | awk '{print $1}'` +echo -e "\033[0;32m++ INFO: Number of Echoes=${Ne}\033[0m" +DataFiles=() +for i in `seq 1 $Ne` +do + EchoID=`printf %02d $i` + File=`echo ${SBJ}_${RUN}_E${EchoID}.nii.gz` + DataFiles[$i]=${File} + Prefix=`echo ${SBJ}_${RUN}_E${EchoID}` + DataPrefixes[$i]=${Prefix} +done +echo -e "\033[0;32m++ INFO: Working Directory:\033[0m" `pwd` + +# Axialize for proper memory allocation in Phyton +# =============================================== +echo -e "\n" +echo -e "\033[0;32m++ STEP (1) Axialize files prior to Z-cat\033[0m" +echo -e "\033[0;32m=========================================\033[0m" +for i in `seq 1 $Ne` +do + 3daxialize -overwrite -prefix pc03.${DataPrefixes[$i]}.volreg.nii.gz pc03.${DataPrefixes[$i]}.volreg.nii.gz +done +3daxialize -overwrite -prefix ${MASK_FB_MNI} ${MASK_FB_MNI} + +# Concatenate EPI datasets in Z-direction# ======================================= +echo -e "\n" +echo -e "\033[0;32m++ STEP (2) Z-concatenating echo files\033[0m" +echo -e "\033[0;32m======================================\033[0m" +FilesToZcat=`ls pc03.${SBJ}_${RUN}_E??.volreg.nii.gz | tr -s '\n' ' '` +echo -e "\033[0;32m++ Files concatenated in Z-direction: ${FilesToZcat}\033[0m" +3dZcat -overwrite -prefix pc06.${SBJ}_${RUN}.zcat.data.nii.gz ${FilesToZcat} + +# Concatenate EPI intra-cranial mask in Z-direction +# ================================================= +echo -e "\n" +echo -e "\033[0;32m++ STEP (3) Z-concatenating mask files\033[0m" +echo -e "\033[0;32m======================================\033[0m" +MasksToZcat='';for i in `seq 1 $Ne`; do MasksToZcat=`echo "${MasksToZcat} ${MASK_FB_MNI}"`; done +echo -e "\033[0;32m++ Files concatenated in Z-direction: ${MasksToZcat}\033[0m" +3dZcat -overwrite -prefix pc06.${SBJ}_${RUN}.zcat.mask.nii.gz ${MasksToZcat} + +# Mask the EPI Z-concatenated file (input to ME-ICA) +# ================================================== +echo -e "\n" +echo -e "\033[0;32m++ STEP (4) Masking the Z-cat file\033[0m" +echo -e "\033[0;32m==================================\033[0m" +3dcalc -float -overwrite -m pc06.${SBJ}_${RUN}.zcat.mask.nii.gz \ + -d pc06.${SBJ}_${RUN}.zcat.data.nii.gz \ + -expr 'm*d' -prefix pc06.${SBJ}_${RUN}.zcat.data.nii.gz + + +# Compute the t2s maps +# ==================== +echo -e "\n" +echo -e "\033[0;32m++ STEP (5) Compute Static T2s and S0 Maps\033[0m" +echo -e "\033[0;32m==========================================\033[0m" +module load python/3.6 +# module load Anaconda +# source activate /data/RS_preprocess/Apps/envs/inati_meica_p27 +python /data/Epilepsy_EEG/Apps/SFIM_ME/rt_tools/me_get_staticT2star.py \ + -d pc06.${SBJ}_${RUN}.zcat.data.nii.gz \ + --mask ${MASK_FB_MNI} \ + --tes_file ${SBJ}_${RUN}_Echoes.1D \ + --out_dir ./ \ + --prefix pc07.${SBJ}_${RUN} \ + --ncpus ${Numjobs} + +3drefit -space MNI pc07.${SBJ}_${RUN}.sTE.t2s.nii +3drefit -space MNI pc07.${SBJ}_${RUN}.sTE.S0.nii +3drefit -space MNI pc07.${SBJ}_${RUN}.sTE.mask.nii +3drefit -space MNI pc07.${SBJ}_${RUN}.SME.nii + +# Compute Optimally Combined +# ========================== +echo -e "\n" +echo -e "\033[0;32m++ STEP (6) Compute Optimally Combined TS\033[0m" +echo -e "\033[0;32m=========================================\033[0m" +python /data/Epilepsy_EEG/Apps/SFIM_ME/rt_tools/me_get_OCtimeseries.py \ + -d pc06.${SBJ}_${RUN}.zcat.data.nii.gz \ + --mask ${MASK_FB_MNI} \ + --t2s pc07.${SBJ}_${RUN}.sTE.t2s.nii \ + --tes_file ${SBJ}_${RUN}_Echoes.1D \ + --out_dir ./ \ + --prefix pc07.${SBJ}_${RUN} + +# source deactivate /data/RS_preprocess/Apps/envs/inati_meica_p27 +# Correct space to MNI +# ==================== +if [ -f pc07.${SBJ}_${RUN}.OCTS.nii ]; then gzip -f pc07.${SBJ}_${RUN}.OCTS.nii; fi +3drefit -space MNI pc07.${SBJ}_${RUN}.OCTS.nii.gz + +# Create Ventricular Regressor for OC +# =================================== +MASK_CSF_ORIG=`echo ${SBJ}_${RUN}.REF.mask.CSF.nii.gz` +MASK_CSF_MNI=`echo ${SBJ}_${RUN}.REF.mask.CSF.MNI.nii.gz` +MNI_MASTER_TEMPLATE=`echo ${PRJDIR}/Scripts/MNI152_T1_2009c_uni.LR2iso+tlrc` +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Create Physio Regressor <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dNwarpApply -overwrite -ainterp NN \ + -source ${MASK_CSF_ORIG} \ + -nwarp ''''../D01_Anatomical/${SBJ}'''_Anat_bc_ns_WARP+tlrc '''${SBJ}'''_'''${RUN}'''.REF2Anat.Xaff12.1D '''${SBJ}'''_'''${RUN}'''_matrix_intrarun.aff12.1D '''${SBJ}'''_'''${RUN}'''_E01.blip_warp_For_WARP+orig' \ + -master ${MNI_MASTER_TEMPLATE} \ + -prefix ${MASK_CSF_MNI} +3dpc -overwrite -dmean -pcsave 5 -mask ${MASK_CSF_MNI} -prefix ${SBJ}_${RUN}_OCTS.CSF.PCA pc07.${SBJ}_${RUN}.OCTS.nii.gz +rm ${SBJ}_${RUN}_OCTS.CSF.PCA??.1D +rm ${SBJ}_${RUN}_OCTS.CSF.PCA_eig.1D +rm ${SBJ}_${RUN}_OCTS.CSF.PCA+tlrc.* + +# Remove regressors of no interest +# ================================ +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Remove Nuisance Signals <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dTproject -overwrite \ + -mask ${MASK_FB_MNI} \ + -input pc07.${SBJ}_${RUN}.OCTS.nii.gz \ + -prefix pc08.${SBJ}_${RUN}_OCTS.project.nii.gz \ + -blur 6 \ + -polort 5 \ + -ort ${SBJ}_${RUN}_Motion.demean.1D \ + -ort ${SBJ}_${RUN}_Motion.demean.der.1D \ + -ort ${SBJ}_${RUN}_OCTS.CSF.PCA_vec.1D + +# Compute spc for OC +# ================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Convert to SPC <---------------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dBlurInMask -overwrite -FWHM 6 \ + -mask ${MASK_FB_MNI} \ + -input pc07.${SBJ}_${RUN}.OCTS.nii.gz \ + -prefix pc08.${SBJ}_${RUN}_OCTS.blur.nii.gz +3dTstat -overwrite -mean \ + -prefix pc08.${SBJ}_${RUN}_OCTS.MEAN.nii.gz \ + pc08.${SBJ}_${RUN}_OCTS.blur.nii.gz +3dcalc -overwrite -a pc08.${SBJ}_${RUN}_OCTS.project.nii.gz \ + -m pc08.${SBJ}_${RUN}_OCTS.MEAN.nii.gz \ + -expr 'a/m' \ + -prefix pc09.${SBJ}_${RUN}_OCTS.spc.nii.gz + +echo -e "\n" +echo -e "\033[0;32m#====================================#\033[0m" +echo -e "\033[0;32m# SUCCESSFUL TERMINATION OF SCRIPT #\033[0m" +echo -e "\033[0;32m#====================================#\033[0m" + diff --git a/rsfMRI/S04_Preprocess_MEICA.sh b/rsfMRI/S04_Preprocess_MEICA.sh new file mode 100755 index 0000000..902bd38 --- /dev/null +++ b/rsfMRI/S04_Preprocess_MEICA.sh @@ -0,0 +1,113 @@ +# ============================================================================ +# Author: Javier Gonzalez-Castillo +# Date: November/12/2017 +# +# Purpose: +# Run MEICA, remove physio, compute SPC time series +# Usage: +# export SBJ=SBJ01 RUN=Event01; sh ./S04_Preprocess_MEICA.sh +# ============================================================================ +Numjobs=6 +# Check for input parameters +# ========================== +if [[ -z "${SBJ}" ]]; then + echo "You need to provide SBJ as an environment variable" + exit +fi +if [[ -z "${RUN}" ]]; then + echo "You need to provide RUN as an environment variable" + exit +fi +set -e + +# Common Stuff +# ============ +source ./00_CommonVariables.sh +MASK_FB_MNI=`echo ${SBJ}_${RUN}.REF.mask.FBrain.MNI.nii.gz` +MASK_CSF_MNI=`echo ${SBJ}_${RUN}.REF.mask.CSF.MNI.nii.gz` +# Enter the D02_Preprocessed directory +# =============================== +cd ${PRJDIR}/PrcsData/${SBJ} + +if [ ! -d D02_Preprocessed ]; then + echo "Pre-processing directory does not exits." + exit +fi + +cd D02_Preprocessed + +# RUN TE-DEPENDENCE DENOISING +# =========================== +module load python/2.7 +# module load Anaconda +# source activate /data/RS_preprocess/Apps/envs/inati_meica_p27 +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Run MEICA Denoising <----------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +ECHOTIMES=`cat ${SBJ}_${RUN}_Echoes.1D` +echo ${ECHOTIMES} +MEICADIR=`echo MEICA_KDAW12_${RUN}` +#python /data/Epilepsy_EEG/Apps/me-ica/meica.libs/tedana.py \ +python /data/Epilepsy_EEG/Apps/MEICA3/me-ica/meica.libs/tedana.py \ + -d pc06.${SBJ}_${RUN}.zcat.data.nii.gz \ + -e ${ECHOTIMES} \ + --label=${MEICADIR} + #--kdaw=12 --label=${MEICADIR} +3dcopy -overwrite TED.${MEICADIR}/dn_ts_OC.nii pc07.${SBJ}_${RUN}.MEICA.nii.gz +3drefit -space MNI pc07.${SBJ}_${RUN}.MEICA.nii.gz + +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Create MEICA Report <----------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +python /data/Epilepsy_EEG/Apps/Meica_Report/meica_report.py \ + -t ${PRJDIR}/PrcsData/${SBJ}/D02_Preprocessed/TED.${MEICADIR}/ \ + -o ${PRJDIR}/PrcsData/${SBJ}/D02_Preprocessed/TED.${MEICADIR}/Report/ \ + --motion ${PRJDIR}/PrcsData/${SBJ}/D02_Preprocessed/${SBJ}_${RUN}_Motion.1D \ + --ncpus 1 --overwrite +# source deactivate /data/RS_preprocess/Apps/envs/inati_meica_p27 + +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Create Physio Regressor <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dpc -overwrite -dmean -pcsave 5 -mask ${MASK_CSF_MNI} -prefix ${SBJ}_${RUN}_MEICA.CSF.PCA pc07.${SBJ}_${RUN}.MEICA.nii.gz +rm ${SBJ}_${RUN}_MEICA.CSF.PCA??.1D +rm ${SBJ}_${RUN}_MEICA.CSF.PCA_eig.1D +rm ${SBJ}_${RUN}_MEICA.CSF.PCA+tlrc.* + +# Remove regressors of no interest +# ================================ +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Remove Nuisance Signals <------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dTproject -overwrite \ + -mask ${MASK_FB_MNI} \ + -input pc07.${SBJ}_${RUN}.MEICA.nii.gz \ + -prefix pc08.${SBJ}_${RUN}_MEICA.project.nii.gz \ + -blur 6 \ + -polort 5 \ + -ort ${SBJ}_${RUN}_Motion.demean.1D \ + -ort ${SBJ}_${RUN}_Motion.demean.der.1D \ + -ort ${SBJ}_${RUN}_MEICA.CSF.PCA_vec.1D + +# Compute spc for MEICA +# ===================== +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +echo -e "\033[0;36m+++ ------------------------> Convert to SPC <---------------------------------------------\033[0m" +echo -e "\033[0;36m+++ =======================================================================================\033[0m" +3dBlurInMask -overwrite -FWHM 6 \ + -mask ${MASK_FB_MNI} \ + -input pc07.${SBJ}_${RUN}.MEICA.nii.gz \ + -prefix pc08.${SBJ}_${RUN}_MEICA.blur.nii.gz +3dTstat -overwrite -mean \ + -prefix pc08.${SBJ}_${RUN}_MEICA.MEAN.nii.gz \ + pc08.${SBJ}_${RUN}_MEICA.blur.nii.gz +3dcalc -overwrite -a pc08.${SBJ}_${RUN}_MEICA.project.nii.gz \ + -m pc08.${SBJ}_${RUN}_MEICA.MEAN.nii.gz \ + -expr 'a/m' \ + -prefix pc09.${SBJ}_${RUN}_MEICA.spc.nii.gz + +echo -e "\n" +echo -e "\033[0;32m#====================================#\033[0m" +echo -e "\033[0;32m# SUCCESSFUL TERMINATION OF SCRIPT #\033[0m" +echo -e "\033[0;32m#====================================#\033[0m" + diff --git a/rsfMRI/tedana_v25.py b/rsfMRI/tedana_v25.py new file mode 100755 index 0000000..c9ab88f --- /dev/null +++ b/rsfMRI/tedana_v25.py @@ -0,0 +1,691 @@ +#!/usr/bin/env python +__version__="v2.5 beta11" +welcome_block=""" +# Multi-Echo ICA, Version %s +# +# Kundu, P., Brenowitz, N.D., Voon, V., Worbe, Y., Vertes, P.E., Inati, S.J., Saad, Z.S., +# Bandettini, P.A. & Bullmore, E.T. Integrated strategy for improving functional +# connectivity mapping using multiecho fMRI. PNAS (2013). +# +# Kundu, P., Inati, S.J., Evans, J.W., Luh, W.M. & Bandettini, P.A. Differentiating +# BOLD and non-BOLD signals in fMRI time series using multi-echo EPI. NeuroImage (2011). +# http://dx.doi.org/10.1016/j.neuroimage.2011.12.028 +# +# PROCEDURE 2 : Computes ME-PCA and ME-ICA +# -Computes T2* map +# -Computes PCA of concatenated ME data, then computes TE-dependence of PCs +# -Computes ICA of TE-dependence PCs +# -Identifies TE-dependent ICs, outputs high-\kappa (BOLD) component +# and denoised time series +# -or- Computes TE-dependence of each component of a general linear model +# specified by input (includes MELODIC FastICA mixing matrix) +""" % (__version__) + +import os +from optparse import OptionParser +import numpy as np +import nibabel as nib +from sys import stdout,argv +import scipy.stats as stats +import time +import datetime +if __name__=='__main__': + selfuncfile='%s/select_model.py' % os.path.dirname(argv[0]) + execfile(selfuncfile) +import cPickle as pickle +#import ipdb +import bz2 + +F_MAX=500 +Z_MAX = 8 + +def _interpolate(a, b, fraction): + """Returns the point at the given fraction between a and b, where + 'fraction' must be between 0 and 1. + """ + return a + (b - a)*fraction; + +def scoreatpercentile(a, per, limit=(), interpolation_method='lower'): + """ + This function is grabbed from scipy + + """ + values = np.sort(a, axis=0) + if limit: + values = values[(limit[0] <= values) & (values <= limit[1])] + + idx = per /100. * (values.shape[0] - 1) + if (idx % 1 == 0): + score = values[int(idx)] + else: + if interpolation_method == 'fraction': + score = _interpolate(values[int(idx)], values[int(idx) + 1], + idx % 1) + elif interpolation_method == 'lower': + score = values[int(np.floor(idx))] + elif interpolation_method == 'higher': + score = values[int(np.ceil(idx))] + else: + raise ValueError("interpolation_method can only be 'fraction', " \ + "'lower' or 'higher'") + return score + +def MAD(data, axis=None): + return np.median(np.abs(data - np.median(data, axis)), axis) + +def dice(A,B): + denom = np.array(A!=0,dtype=np.int).sum(0)+(np.array(B!=0,dtype=np.int).sum(0)) + if denom!=0: + AB_un = andb([A!=0,B!=0])==2 + numer = np.array(AB_un,dtype=np.int).sum(0) + return 2.*numer/denom + else: + return 0. + +def spatclust(data,mask,csize,thr,header,aff,infile=None,dindex=0,tindex=0): + if infile==None: + data = data.copy() + data[data1 and dindex+tindex==0: addopts="-doall" + if data is not None and len(np.squeeze(data).shape)>1 and dindex+tindex==0: addopts="-doall" + else: addopts="-1dindex %s -1tindex %s" % (str(dindex),str(tindex)) + os.system('3dmerge -overwrite %s -dxyz=1 -1clust 1 %i -1thresh %.02f -prefix __clout.nii.gz %s' % (addopts,int(csize),float(thr),infile)) + clustered = fmask(nib.load('__clout.nii.gz').get_data(),mask)!=0 + return clustered + +def rankvec(vals): + asort = np.argsort(vals) + ranks = np.zeros(vals.shape[0]) + ranks[asort]=np.arange(vals.shape[0])+1 + return ranks + +def niwrite(data,affine, name , header=None): + stdout.write(" + Writing file: %s ...." % name) + + thishead = header + if thishead == None: + thishead = head.copy() + thishead.set_data_shape(list(data.shape)) + + outni = nib.Nifti1Image(data,affine,header=thishead) + outni.to_filename(name) + print ('done.') + +def cat2echos(data,Ne): + """ + cat2echos(data,Ne) + + Input: + data shape is (nx,ny,Ne*nz,nt) + """ + nx,ny = data.shape[0:2] + nz = data.shape[2]/Ne + if len(data.shape) >3: + nt = data.shape[3] + else: + nt = 1 + return np.reshape(data,(nx,ny,nz,Ne,nt),order='F') + +def uncat2echos(data,Ne): + """ + uncat2echos(data,Ne) + + Input: + data shape is (nx,ny,Ne,nz,nt) + """ + nx,ny = data.shape[0:2] + nz = data.shape[2]*Ne + if len(data.shape) >4: + nt = data.shape[4] + else: + nt = 1 + return np.reshape(data,(nx,ny,nz,nt),order='F') + +def makemask(cdat): + + nx,ny,nz,Ne,nt = cdat.shape + + mask = np.ones((nx,ny,nz),dtype=np.bool) + + for i in range(Ne): + tmpmask = (cdat[:,:,:,i,:] != 0).prod(axis=-1,dtype=np.bool) + mask = mask & tmpmask + + return mask + +def fmask(data,mask): + """ + fmask(data,mask) + + Input: + data shape is (nx,ny,nz,...) + mask shape is (nx,ny,nz) + + Output: + out shape is (Nm,...) + """ + + s = data.shape + sm = mask.shape + + N = s[0]*s[1]*s[2] + news = [] + news.append(N) + + if len(s) >3: + news.extend(s[3:]) + + tmp1 = np.reshape(data,news) + fdata = tmp1.compress((mask > 0 ).ravel(),axis=0) + + return fdata.squeeze() + +def unmask (data,mask): + """ + unmask (data,mask) + + Input: + + data has shape (Nm,nt) + mask has shape (nx,ny,nz) + + """ + M = (mask != 0).ravel() + Nm = M.sum() + + nx,ny,nz = mask.shape + + if len(data.shape) > 1: + nt = data.shape[1] + else: + nt = 1 + + out = np.zeros((nx*ny*nz,nt),dtype=data.dtype) + out[M,:] = np.reshape(data,(Nm,nt)) + + return np.reshape(out,(nx,ny,nz,nt)) + +def t2smap(catd,mask,tes): + """ + t2smap(catd,mask,tes) + + Input: + + catd has shape (nx,ny,nz,Ne,nt) + mask has shape (nx,ny,nz) + tes is a 1d numpy array + """ + nx,ny,nz,Ne,nt = catd.shape + N = nx*ny*nz + + echodata = fmask(catd,mask) + Nm = echodata.shape[0] + + #Do Log Linear fit + B = np.reshape(np.abs(echodata), (Nm,Ne*nt)).transpose() + B = np.log(B) + x = np.array([np.ones(Ne),-tes]) + X = np.tile(x,(1,nt)) + X = np.sort(X)[:,::-1].transpose() + + beta,res,rank,sing = np.linalg.lstsq(X,B) + t2s = 1/beta[1,:].transpose() + s0 = np.exp(beta[0,:]).transpose() + + #Goodness of fit + alpha = (np.abs(B)**2).sum(axis=0) + t2s_fit = blah = (alpha - res)/(2*res) + + out = unmask(t2s,mask),unmask(s0,mask),unmask(t2s_fit,mask) + + return out + +def get_coeffs(data,mask,X,add_const=False): + """ + get_coeffs(data,X) + + Input: + + data has shape (nx,ny,nz,nt) + mask has shape (nx,ny,nz) + X has shape (nt,nc) + + Output: + + out has shape (nx,ny,nz,nc) + """ + mdata = fmask(data,mask).transpose() + + X=np.atleast_2d(X) + if X.shape[0]==1: X=X.T + Xones = np.atleast_2d(np.ones(np.min(mdata.shape))).T + if add_const: X = np.hstack([X,Xones]) + + tmpbetas = np.linalg.lstsq(X,mdata)[0].transpose() + if add_const: tmpbetas = tmpbetas[:,:-1] + out = unmask(tmpbetas,mask) + + return out + + +def andb(arrs): + result = np.zeros(arrs[0].shape) + for aa in arrs: result+=np.array(aa,dtype=np.int) + return result + +def optcom(data,t2s,tes,mask): + """ + out = optcom(data,t2s) + + + Input: + + data.shape = (nx,ny,nz,Ne,Nt) + t2s.shape = (nx,ny,nz) + tes.shape = (Ne,) + + Output: + + out.shape = (nx,ny,nz,Nt) + """ + nx,ny,nz,Ne,Nt = data.shape + + fdat = fmask(data,mask) + ft2s = fmask(t2s,mask) + + tes = tes[np.newaxis,:] + ft2s = ft2s[:,np.newaxis] + + alpha = tes * np.exp(-tes /ft2s) + alpha = np.tile(alpha[:,:,np.newaxis],(1,1,Nt)) + + fout = np.average(fdat,axis = 1,weights=alpha) + out = unmask(fout,mask) + print ('Out shape is ', out.shape) + return out + +def getelbow(ks): + nc = ks.shape[0] + coords = np.array([np.arange(nc),ks]) + p = coords - np.tile(np.reshape(coords[:,0],(2,1)),(1,nc)) + b = p[:,-1] + b_hat = np.reshape(b/np.sqrt((b**2).sum()),(2,1)) + proj_p_b = p - np.dot(b_hat.T,p)*np.tile(b_hat,(1,nc)) + d = np.sqrt((proj_p_b**2).sum(axis=0)) + k_min_ind = d.argmax() + k_min = ks[k_min_ind] + return k_min_ind + +def getfbounds(ne): + F05s=[None,None,18.5,10.1,7.7,6.6,6.0,5.6,5.3,5.1,5.0] + F025s=[None,None,38.5,17.4,12.2,10,8.8,8.1,7.6,7.2,6.9] + F01s=[None,None,98.5,34.1,21.2,16.2,13.8,12.2,11.3,10.7,10.] + return F05s[ne-1],F025s[ne-1],F01s[ne-1] + +def eimask(dd,ees=None): + if ees==None: ees=range(dd.shape[1]) + imask = np.zeros([dd.shape[0],len(ees)]) + for ee in ees: + print (ee) + lthr = 0.001*scoreatpercentile(dd[:,ee,:].flatten(),98) + hthr = 5*scoreatpercentile(dd[:,ee,:].flatten(),98) + print (lthr,hthr) + imask[dd[:,ee,:].mean(1) > lthr,ee]=1 + imask[dd[:,ee,:].mean(1) > hthr,ee]=0 + return imask + +def tedpca(ste=0): + nx,ny,nz,ne,nt = catd.shape + ste = np.array([int(ee) for ee in str(ste).split(',')]) + if len(ste) == 1 and ste[0]==-1: + print ("-Computing PCA of optimally combined multi-echo data") + OCcatd = optcom(catd,t2s,tes,mask) + OCmask = makemask(OCcatd[:,:,:,np.newaxis,:]) + d = fmask(OCcatd,OCmask) + eim = eimask(d[:,np.newaxis,:]) + eim = eim[:,0]==1 + d = d[eim,:] + #ipdb.set_trace() + elif len(ste) == 1 and ste[0]==0: + print ("-Computing PCA of spatially concatenated multi-echo data") + ste = np.arange(ne) + d = np.float64(fmask(catd,mask)) + eim = eimask(d)==1 + d = d[eim] + else: + print ("-Computing PCA of TE #%s" % ','.join([str(ee) for ee in ste])) + d = np.float64(np.concatenate([fmask(catd[:,:,:,ee,:],mask)[:,np.newaxis,:] for ee in ste-1],axis=1)) + eim = eimask(d)==1 + eim = np.squeeze(eim) + d = np.squeeze(d[eim]) + + dz = ((d.T-d.T.mean(0))/d.T.std(0)).T #Variance normalize timeseries + dz = (dz-dz.mean())/dz.std() #Variance normalize everything + + pcastate_fn = 'pcastate.pklbz' + + if not os.path.exists(pcastate_fn): + ##Do PC dimension selection + #Get eigenvalue cutoff + u,s,v = np.linalg.svd(dz,full_matrices=0) + sp = s/s.sum() + eigelb = sp[getelbow(sp)] + + #ipdb.set_trace() + + spdif = np.abs(sp[1:]-sp[:-1]) + spdifh = spdif[spdif.shape[0]/2:] + spdmin = spdif.min() + spdthr = np.mean([spdifh.max(),spdmin]) + spmin = sp[(spdif.shape[0]/2)+(np.arange(spdifh.shape[0])[spdifh>=spdthr][0])+1] + spcum = [] + spcumv = 0 + for sss in sp: + spcumv+=sss + spcum.append(spcumv) + spcum = np.array(spcum) + + #Compute K and Rho for PCA comps + + #ipdb.set_trace() + + eimum = np.atleast_2d(eim) + eimum = np.transpose(eimum,np.argsort(np.atleast_2d(eim).shape)[::-1]) + eimum = np.array(np.squeeze(unmask(eimum.prod(1),mask)),dtype=np.bool) + vTmix = v.T + vTmixN =((vTmix.T-vTmix.T.mean(0))/vTmix.T.std(0)).T + #ctb,KRd,betasv,v_T = fitmodels2(catd,v.T,eimum,t2s,tes,mmixN=vTmixN) + none,ctb,betasv,v_T = fitmodels_direct(catd,v.T,eimum,t2s,tes,mmixN=vTmixN,full_sel=False) + ctb = ctb[ctb[:,0].argsort(),:] + ctb = np.vstack([ctb.T[0:3],sp]).T + + #Save state + print ("Saving PCA") + pcastate = {'u':u,'s':s,'v':v,'ctb':ctb,'eigelb':eigelb,'spmin':spmin,'spcum':spcum} + pcastate_f = bz2.BZ2File('pcastate.pklbz','wb') + pickle.dump(pcastate,pcastate_f) + pcastate_f.close() + + else: + print ("Loading PCA") + pcastate_f = bz2.BZ2File('pcastate.pklbz','rb') + pcastate = pickle.load(pcastate_f) + for key,val in pcastate.items(): exec(key + '=val') + + np.savetxt('comp_table_pca.txt',ctb[ctb[:,1].argsort(),:][::-1]) + np.savetxt('mepca_mix.1D',v[ctb[:,1].argsort()[::-1],:].T) + + kappas = ctb[ctb[:,1].argsort(),1] + rhos = ctb[ctb[:,2].argsort(),2] + fmin,fmid,fmax = getfbounds(ne) + kappa_thr = np.average(sorted([fmin,kappas[getelbow(kappas)]/2,fmid]),weights=[kdaw,1,1]) + rho_thr = np.average(sorted([fmin,rhos[getelbow(rhos)]/2,fmid]),weights=[rdaw,1,1]) + if int(kdaw)==-1: + kappas_lim = kappas[andb([kappasfmin])==2] + #kappas_lim = kappas[andb([kappasfmin])==2] + kappa_thr = kappas_lim[getelbow(kappas_lim)] + rhos_lim = rhos[andb([rhosfmin])==2] + rho_thr = rhos_lim[getelbow(rhos_lim)] + options.stabilize=True + if int(kdaw)!=-1 and int(rdaw)==-1: + rhos_lim = rhos[andb([rhosfmin])==2] + rho_thr = rhos_lim[getelbow(rhos_lim)] + if options.stabilize: + pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(spcum<0.95,dtype=np.int)*np.array(ctb[:,2]>fmin,dtype=np.int)*np.array(ctb[:,1]>fmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int) + else: + pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int) + pcsel = pcscore > 0 + pcrej = np.array(pcscore==0,dtype=np.int)*np.array(ctb[:,3]>spmin,dtype=np.int) > 0 + + #ipdb.set_trace() + + dd = u.dot(np.diag(s*np.array(pcsel,dtype=np.int))).dot(v) + nc = s[pcsel].shape[0] + print (pcsel) + print ("--Selected %i components. Minimum Kappa=%0.2f Rho=%0.2f" % (nc,kappa_thr,rho_thr)) + + dd = ((dd.T-dd.T.mean(0))/dd.T.std(0)).T #Variance normalize timeseries + dd = (dd-dd.mean())/dd.std() #Variance normalize everything + + return nc,dd + +def tedica(dd,cost): + """ + Input is dimensionally reduced spatially concatenated multi-echo time series dataset from tedpca() + Output is comptable, mmix, smaps from ICA, and betas from fitting catd to mmix + """ + #Do ICA + climit = float("%s" % options.conv) + #icanode = mdp.nodes.FastICANode(white_comp=nc, white_parm={'svd':True},approach='symm', g=cost, fine_g=options.finalcost, limit=climit, verbose=True) + icanode = mdp.nodes.FastICANode(white_comp=nc,approach='symm', g=cost, fine_g=options.finalcost, coarse_limit=climit*100, limit=climit, verbose=True) + icanode.train(dd) + smaps = icanode.execute(dd) + mmix = icanode.get_recmatrix().T + mmix = (mmix-mmix.mean(0))/mmix.std(0) + return mmix + +def write_split_ts(data,comptable,mmix,suffix=''): + mdata = fmask(data,mask) + betas = fmask(get_coeffs(unmask((mdata.T-mdata.T.mean(0)).T,mask),mask,mmix),mask) + dmdata = mdata.T-mdata.T.mean(0) + varexpl = (1-((dmdata.T-betas.dot(mmix.T))**2.).sum()/(dmdata**2.).sum())*100 + print ('Variance explained: ', varexpl , '%') + print midk + print type(midk) + midkts = betas[:,midk].dot(mmix.T[midk,:]) + lowkts = betas[:,rej].dot(mmix.T[rej,:]) + if len(acc)!=0: + niwrite(unmask(betas[:,acc].dot(mmix.T[acc,:]),mask),aff,'_'.join(['hik_ts',suffix])+'.nii') + if len(midk)!=0: + niwrite(unmask(midkts,mask),aff,'_'.join(['midk_ts',suffix])+'.nii') + if len(rej)!=0: + niwrite(unmask(lowkts,mask),aff,'_'.join(['lowk_ts',suffix])+'.nii') + niwrite(unmask(fmask(data,mask)-lowkts-midkts,mask),aff,'_'.join(['dn_ts',suffix])+'.nii') + return varexpl + +def split_ts(data,comptable,mmix): + cbetas = get_coeffs(data-data.mean(-1)[:,:,:,np.newaxis],mask,mmix) + betas = fmask(cbetas,mask) + if len(acc)!=0: + hikts=unmask(betas[:,acc].dot(mmix.T[acc,:]),mask) + else: + hikts = None + return hikts,data-hikts + +def writefeats(cbetas,comptable,mmix,suffix=''): + #Write signal changes (dS) + niwrite(cbetas[:,:,:,:],aff,'_'.join(['betas',suffix])+'.nii') + niwrite(cbetas[:,:,:,acc],aff,'_'.join(['betas_hik',suffix])+'.nii') + #Compute features (dS/S) + if options.e2d==None: e2d=np.floor(ne/2)+1 + edm = fmask(catd[:,:,:,e2d-1,:],mask) + edms = edm/edm.std(-1)[:,np.newaxis] + edms[edm<1]=0 + hik,noise = split_ts(unmask(edms,mask),comptable,mmix) + noise = noise-noise.mean(-1)[:,:,:,np.newaxis] + + zfac = 1./(mmix.shape[0]-len(acc)-1)*(noise**2).sum(-1) #noise scaling + niwrite(zfac,aff,'zfac.nii') + + cbetam = fmask(cbetas[:,:,:,acc],mask) + cbetam = (cbetam-cbetam.mean(0))/cbetam.std(0) + cbetam = cbetam/fmask(zfac,mask)[:,np.newaxis] + cbetam[edm.mean(-1)<1,:] = 0 + + niwrite(unmask(cbetam,mask),aff,'_'.join(['feats',suffix])+'.nii') + +def computefeats2(data,mmix,mask,normalize=True): + #Write feature versions of components + data = data[mask] + data_vn = (data-data.mean(axis=-1)[:,np.newaxis])/data.std(axis=-1)[:,np.newaxis] + data_R = get_coeffs(unmask(data_vn,mask),mask,mmix)[mask] + data_R[data_R<-.999] = -0.999 + data_R[data_R>.999] = .999 + data_Z = np.arctanh(data_R) + if normalize: + #data_Z2 = ((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis]).T + data_Z = (((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis]) + (data_Z.mean(0)/data_Z.std(0))[:,np.newaxis]).T + return data_Z + +def writefeats2(data,mmix,mask,suffix=''): + #Write feature versions of components + feats = computefeats2(data,mmix,mask) + niwrite(unmask(feats,mask),aff,'_'.join(['feats',suffix])+'.nii') + +def writect(comptable,ctname='',varexpl='-1',classarr=[]): + global acc,rej,midk,empty + if len(classarr)!=0: + acc,rej,midk,empty = classarr + nc = comptable.shape[0] + ts = time.time() + st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + sortab = comptable[comptable[:,1].argsort()[::-1],:] + if ctname=='': ctname = 'comp_table.txt' + open('accepted.txt','w').write(','.join([str(int(cc)) for cc in acc])) + open('rejected.txt','w').write(','.join([str(int(cc)) for cc in rej])) + open('midk_rejected.txt','w').write(','.join([str(int(cc)) for cc in midk])) + with open(ctname,'w') as f: + f.write("#\n#ME-ICA Component statistics table for: %s \n#Run on %s \n#\n" % (os.path.abspath(os.path.curdir),st) ) + f.write("#Dataset variance explained by ICA (VEx): %.02f \n" % ( varexpl ) ) + f.write("#Total components generated by decomposition (TCo): %i \n" % ( nc ) ) + f.write("#No. accepted BOLD-like components, i.e. effective degrees of freedom for correlation (lower bound; DFe): %i\n" % ( len(acc) ) ) + f.write("#Total number of rejected components (RJn): %i\n" % (len(midk)+len(rej)) ) + f.write("#Nominal degress of freedom in denoised time series (..._medn.nii.gz; DFn): %i \n" % (nt-len(midk)-len(rej)) ) + f.write("#ACC %s \t#Accepted BOLD-like components\n" % ','.join([str(int(cc)) for cc in acc]) ) + f.write("#REJ %s \t#Rejected non-BOLD components\n" % ','.join([str(int(cc)) for cc in rej]) ) + f.write("#MID %s \t#Rejected R2*-weighted artifacts\n" % ','.join([str(int(cc)) for cc in midk]) ) + f.write("#IGN %s \t#Ignored components (kept in denoised time series)\n" % ','.join([str(int(cc)) for cc in empty]) ) + f.write("#VEx TCo DFe RJn DFn \n") + f.write("##%.02f %i %i %i %i \n" % (varexpl,nc,len(acc),len(midk)+len(rej),nt-len(midk)-len(rej))) + f.write("# comp Kappa Rho %%Var %%Var(norm) \n") + for i in range(nc): + f.write('%d\t%f\t%f\t%.2f\t%.2f\n'%(sortab[i,0],sortab[i,1],sortab[i,2],sortab[i,3],sortab[i,4])) + +def writeresults(): + print ("++ Writing optimally combined time series") + ts = optcom(catd,t2s,tes,mask) + niwrite(ts,aff,'ts_OC.nii') + print ("++ Writing Kappa-filtered optimally combined timeseries") + varexpl = write_split_ts(ts,comptable,mmix,'OC') + print ("++ Writing signal versions of components") + ts_B = get_coeffs(ts,mask,mmix) + niwrite(ts_B[:,:,:,:],aff,'_'.join(['betas','OC'])+'.nii') + if len(acc)!=0: + niwrite(ts_B[:,:,:,acc],aff,'_'.join(['betas_hik','OC'])+'.nii') + print ("++ Writing optimally combined high-Kappa features") + writefeats2(split_ts(ts,comptable,mmix)[0],mmix[:,acc],mask,'OC2') + print ("++ Writing component table") + writect(comptable,'comp_table.txt',varexpl) + if options.e2d!=None: + options.e2d=int(options.e2d) + print ("++ Writing Kappa-filtered TE#%i timeseries" % (options.e2d)) + write_split_ts(catd[:,:,:,options.e2d-1,:],comptable,mmix,'e%i' % options.e2d) + print ("++ Writing high-Kappa TE#%i features" % (options.e2d)) + writefeats(betas[:,:,:,options.e2d-1,:],comptable,mmix,'e%i' % options.e2d) + + + +################################################################################################### +# Begin Main +################################################################################################### + +if __name__=='__main__': + + parser=OptionParser() + parser.add_option('-d',"--orig_data",dest='data',help="Spatially Concatenated Multi-Echo Dataset",default=None) + parser.add_option('-m',"--mask",dest='mask_path',help="Path to pre-computed intracranial mask",default=None) + parser.add_option('-e',"--TEs",dest='tes',help="Echo times (in ms) ex: 15,39,63",default=None) + parser.add_option('',"--mix",dest='mixm',help="Mixing matrix. If not provided, ME-PCA & ME-ICA (MDP) is done.",default=None) + parser.add_option('',"--manacc",dest='manacc',help="Comma separated list of manually accepted components",default=None) + parser.add_option('',"--kdaw",dest='kdaw',help="Dimensionality augmentation weight (Kappa). Default 10. -1 for low-dimensional ICA",default=10.) + parser.add_option('',"--rdaw",dest='rdaw',help="Dimensionality augmentation weight (Rho). Default 1. -1 for low-dimensional ICA",default=1.) + parser.add_option('',"--conv",dest='conv',help="Convergence limit. Default 2.5e-5",default='2.5e-5') + parser.add_option('',"--sourceTEs",dest='ste',help="Source TEs for models. ex: -ste 2,3 ; -ste 0 for all, -1 for opt. com. Default -1.",default=0-1) + parser.add_option('',"--denoiseTE",dest='e2d',help="TE to denoise. Default middle",default=None) + parser.add_option('',"--initcost",dest='initcost',help="Initial cost func. for ICA: pow3,tanh(default),gaus,skew",default='tanh') + parser.add_option('',"--finalcost",dest='finalcost',help="Final cost func, same opts. as initial",default='tanh') + parser.add_option('',"--stabilize",dest='stabilize',action='store_true',help="Stabilize convergence by reducing dimensionality, for low quality data",default=False) + parser.add_option('',"--fout",dest='fout',help="Output TE-dependence Kappa/Rho SPMs",action="store_true",default=False) + parser.add_option('',"--label",dest='label',help="Label for output directory.",default=None) + + (options,args) = parser.parse_args() + + print ("-- ME-PCA/ME-ICA Component for ME-ICA %s--" % __version__) + + if options.tes==None or options.data==None: + print ("*+ Need at least data and TEs, use -h for help.") + sys.exit() + + print ("++ Loading Data") + tes = np.fromstring(options.tes,sep=',',dtype=np.float32) + ne = tes.shape[0] + catim = nib.load(options.data) + head = catim.get_header() + head.extensions = [] + head.set_sform(head.get_sform(),code=1) + aff = catim.get_affine() + catd = cat2echos(catim.get_data(),ne) + nx,ny,nz,Ne,nt = catd.shape + mu = catd.mean(axis=-1) + sig = catd.std(axis=-1) + + """Parse options, prepare output directory""" + if options.fout: options.fout = aff + else: options.fout=None + kdaw = float(options.kdaw) + rdaw = float(options.rdaw) + if options.label!=None: dirname='%s' % '.'.join(['TED',options.label]) + else: dirname='TED' + os.system('mkdir %s' % dirname) + if options.mixm!=None: + try: + os.system('cp %s %s/meica_mix.1D; cp %s %s/%s' % (options.mixm,dirname,options.mixm,dirname,os.path.basename(options.mixm))) + except: + pass + os.chdir(dirname) + + print ("++ Computing Mask") + if options.mask_path != None: + mask_nib = nib.load(options.mask_path) + mask = mask_nib.get_data() + mask = mask.astype(np.bool) + print ("++ Dimensions of User Provided Mask: %d,%d,%d" % (np.shape(mask))) + type(mask) + else: + mask = makemask(catd) + print ("++ Dimensions of Newly Created Mask: %d,%d,%d" % np.shape(mask)) + type(mask) + niwrite(mask,aff,'mask.nii') + print ("++ Computing T2* map") + t2s,s0,t2s_fit = t2smap(catd,mask,tes) + #Condition values + cap_t2s = scoreatpercentile(t2s.flatten(),99.5) + t2s[t2s>cap_t2s*10]=cap_t2s + niwrite(s0,aff,'s0v.nii') + niwrite(t2s,aff,'t2sv.nii') + niwrite(t2s_fit,aff,'t2sF.nii') + + if options.mixm == None: + print ("++ Doing ME-PCA and ME-ICA with MDP") + import mdp + nc,dd = tedpca(options.ste) + mmix_orig = tedica(dd,cost=options.initcost) + np.savetxt('__meica_mix.1D',mmix_orig) + seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout,reindex=True) + np.savetxt('meica_mix.1D',mmix) + acc,rej,midk,empty = selcomps(seldict,knobargs=args) + del dd + else: + mmix_orig = np.loadtxt('meica_mix.1D') + eim = eimask(np.float64(fmask(catd,mask)))==1 + eimum = np.array(np.squeeze(unmask(np.array(eim,dtype=np.int).prod(1),mask)),dtype=np.bool) + seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout) + acc,rej,midk,empty = selcomps(seldict,knobargs=args) + + if len(acc)==0: + print ("\n** WARNING! No BOLD components detected!!! Please check data and results!\n") + + writeresults() diff --git a/rsfMRI/tedana_v32.py b/rsfMRI/tedana_v32.py new file mode 100755 index 0000000..3227a14 --- /dev/null +++ b/rsfMRI/tedana_v32.py @@ -0,0 +1,1052 @@ +#!/usr/bin/env python +__version__="v3.2 beta1" +welcome_block=""" +# Multi-Echo ICA, Version %s +# +# Kundu, P., Brenowitz, N.D., Voon, V., Worbe, Y., Vertes, P.E., Inati, S.J., Saad, Z.S., +# Bandettini, P.A. & Bullmore, E.T. Integrated strategy for improving functional +# connectivity mapping using multiecho fMRI. PNAS (2013). +# +# Kundu, P., Inati, S.J., Evans, J.W., Luh, W.M. & Bandettini, P.A. Differentiating +# BOLD and non-BOLD signals in fMRI time series using multi-echo EPI. NeuroImage (2011). +# http://dx.doi.org/10.1016/j.neuroimage.2011.12.028 +# +# PROCEDURE 2 : Computes ME-PCA and ME-ICA +# -Computes T2* map +# -Computes PCA of concatenated ME data, then computes TE-dependence of PCs +# -Computes ICA of TE-dependence PCs +# -Identifies TE-dependent ICs, outputs high-\kappa (BOLD) component +# and denoised time series +# -or- Computes TE-dependence of each component of a general linear model +# specified by input (includes MELODIC FastICA mixing matrix) +""" % (__version__) + +import os +import sys +from optparse import OptionParser +import numpy as np +import nibabel as nib +from sys import stdout,argv +import scipy.stats as stats +import time +import datetime +if __name__=='__main__': + selfuncfile='%s/select_model_fft20e.py' % os.path.dirname(argv[0]) + execfile(selfuncfile) +import cPickle as pickle +import gzip +import pdb + +if 'DEBUG' in argv: + import ipdb + debug_mode = True +else: debug_mode = False + +F_MAX=500 +Z_MAX = 8 + +def _interpolate(a, b, fractionepsmap): + """Returns the point at the given fraction between a and b, where + 'fraction' must be between 0 and 1. + """ + return a + (b - a)*fraction; + +def scoreatpercentile(a, per, limit=(), interpolation_method='lower'): + """ + This function is grabbed from scipy + + """ + values = np.sort(a, axis=0) + if limit: + values = values[(limit[0] <= values) & (values <= limit[1])] + + idx = per /100. * (values.shape[0] - 1) + if (idx % 1 == 0): + score = values[int(idx)] + else: + if interpolation_method == 'fraction': + score = _interpolate(values[int(idx)], values[int(idx) + 1], + idx % 1) + elif interpolation_method == 'lower': + score = values[int(np.floor(idx))] + elif interpolation_method == 'higher': + score = values[int(np.ceil(idx))] + else: + raise ValueError("interpolation_method can only be 'fraction', " \ + "'lower' or 'higher'") + return score + +def MAD(data, axis=None): + return np.median(np.abs(data - np.median(data, axis)), axis) + +def dice(A,B): + denom = np.array(A!=0,dtype=np.int).sum(0)+(np.array(B!=0,dtype=np.int).sum(0)) + if denom!=0: + AB_un = andb([A!=0,B!=0])==2 + numer = np.array(AB_un,dtype=np.int).sum(0) + return 2.*numer/denom + else: + return 0. + +def spatclust(data,mask,csize,thr,header,aff,infile=None,dindex=0,tindex=0): + if infile==None: + data = data.copy() + data[data1 and dindex+tindex==0: addopts="-doall" + else: addopts="-1dindex %s -1tindex %s" % (str(dindex),str(tindex)) + os.system('3dmerge -overwrite %s -dxyz=1 -1clust 1 %i -1thresh %.02f -prefix __clout.nii.gz %s' % (addopts,int(csize),float(thr),infile)) + clustered = fmask(nib.load('__clout.nii.gz').get_data(),mask)!=0 + return clustered + +def rankvec(vals): + asort = np.argsort(vals) + ranks = np.zeros(vals.shape[0]) + ranks[asort]=np.arange(vals.shape[0])+1 + return ranks + +def niwrite(data,affine, name , header=None): + stdout.write(" + Writing file: %s ...." % name) + + thishead = header + if thishead == None: + thishead = head.copy() + thishead.set_data_shape(list(data.shape)) + + outni = nib.Nifti1Image(data,affine,header=thishead) + outni.to_filename(name) + print 'done.' + +def cat2echos(data,Ne): + """ + cat2echos(data,Ne) + + Input: + data shape is (nx,ny,Ne*nz,nt) + """ + nx,ny = data.shape[0:2] + nz = data.shape[2]/Ne + if len(data.shape) >3: + nt = data.shape[3] + else: + nt = 1 + return np.reshape(data,(nx,ny,nz,Ne,nt),order='F') + +def uncat2echos(data,Ne): + """ + uncat2echos(data,Ne) + + Input: + data shape is (nx,ny,Ne,nz,nt) + """ + nx,ny = data.shape[0:2] + nz = data.shape[2]*Ne + if len(data.shape) >4: + nt = data.shape[4] + else: + nt = 1 + return np.reshape(data,(nx,ny,nz,nt),order='F') + +def makemask(cdat): + + nx,ny,nz,Ne,nt = cdat.shape + + mask = np.ones((nx,ny,nz),dtype=np.bool) + + for i in range(Ne): + tmpmask = (cdat[:,:,:,i,:] != 0).prod(axis=-1,dtype=np.bool) + mask = mask & tmpmask + + return mask + +def makeadmask(cdat,min=True,getsum=False): + + nx,ny,nz,Ne,nt = cdat.shape + + mask = np.ones((nx,ny,nz),dtype=np.bool) + + if min: + mask = cdat[:,:,:,:,:].prod(axis=-1).prod(-1)!=0 + return mask + else: + #Make a map of longest echo that a voxel can be sampled with, + #with minimum value of map as X value of voxel that has median + #value in the 1st echo. N.b. larger factor leads to bias to lower TEs + emeans = cdat[:,:,:,:,:].mean(-1) + medv = emeans[:,:,:,0] == stats.scoreatpercentile(emeans[:,:,:,0][emeans[:,:,:,0]!=0],33,interpolation_method='higher') + lthrs = np.squeeze(np.array([ emeans[:,:,:,ee][medv]/3 for ee in range(Ne) ])) + if len(lthrs.shape)==1: lthrs = np.atleast_2d(lthrs).T + lthrs = lthrs[:,lthrs.sum(0).argmax()] + mthr = np.ones([nx,ny,nz,ne]) + for ee in range(Ne): mthr[:,:,:,ee]*=lthrs[ee] + mthr = np.abs(emeans[:,:,:,:])>mthr + masksum = np.array(mthr,dtype=np.int).sum(-1) + mask = masksum!=0 + if getsum: return mask,masksum + else: return mask + +def fmask(data,mask): + """ + fmask(data,mask) + + Input: + data shape is (nx,ny,nz,...) + mask shape is (nx,ny,nz) + + Output: + out shape is (Nm,...) + """ + + s = data.shape + sm = mask.shape + + N = s[0]*s[1]*s[2] + news = [] + news.append(N) + + if len(s) >3: + news.extend(s[3:]) + + tmp1 = np.reshape(data,news) + fdata = tmp1.compress((mask > 0 ).ravel(),axis=0) + + return fdata.squeeze() + +def unmask (data,mask): + """ + unmask (data,mask) + + Input: + + data has shape (Nm,nt) + mask has shape (nx,ny,nz) + + """ + M = (mask != 0).ravel() + Nm = M.sum() + + nx,ny,nz = mask.shape + + if len(data.shape) > 1: + nt = data.shape[1] + else: + nt = 1 + + out = np.zeros((nx*ny*nz,nt),dtype=data.dtype) + out[M,:] = np.reshape(data,(Nm,nt)) + + return np.squeeze(np.reshape(out,(nx,ny,nz,nt))) + +def t2smap(catd,mask,tes): + """ + t2smap(catd,mask,tes) + + Input: + + catd has shape (nx,ny,nz,Ne,nt) + mask has shape (nx,ny,nz) + tes is a 1d numpy array + """ + nx,ny,nz,Ne,nt = catd.shape + N = nx*ny*nz + + echodata = fmask(catd,mask) + Nm = echodata.shape[0] + + #Do Log Linear fit + B = np.reshape(np.abs(echodata[:,:ne])+1, (Nm,(ne)*nt)).transpose() + B = np.log(B) + x = np.array([np.ones(Ne),-tes]) + X = np.tile(x,(1,nt)) + X = np.sort(X)[:,::-1].transpose() + + beta,res,rank,sing = np.linalg.lstsq(X,B) + t2s = 1/beta[1,:].transpose() + s0 = np.exp(beta[0,:]).transpose() + + #Goodness of fit + alpha = (np.abs(B)**2).sum(axis=0) + t2s_fit = blah = (alpha - res)/(2*res) + + out = np.squeeze(unmask(t2s,mask)),np.squeeze(unmask(s0,mask)),unmask(t2s_fit,mask) + + return out + +def t2sadmap(catd,mask,tes): + """ + t2smap(catd,mask,tes) + + Input: + + catd has shape (nx,ny,nz,Ne,nt) + mask has shape (nx,ny,nz) + tes is a 1d numpy array + """ + nx,ny,nz,Ne,nt = catd.shape + N = nx*ny*nz + + echodata = fmask(catd,mask) + Nm = echodata.shape[0] + + t2ss = np.zeros([nx,ny,nz,Ne-1]) + s0vs = np.zeros([nx,ny,nz,Ne-1]) + + for ne in range(1,Ne+1): + + #Do Log Linear fit + B = np.reshape(np.abs(echodata[:,:ne])+1, (Nm,(ne)*nt)).transpose() + B = np.log(B) + x = np.array([np.ones(ne),-tes[:ne] ]) + X = np.tile(x,(1,nt)) + X = np.sort(X)[:,::-1].transpose() + + beta,res,rank,sing = np.linalg.lstsq(X,B) + t2s = 1/beta[1,:].transpose() + s0 = np.exp(beta[0,:]).transpose() + + t2s[np.isinf(t2s)] = 500. + s0[np.isnan(s0)] = 0. + + t2ss[:,:,:,ne-2] = np.squeeze(unmask(t2s,mask)) + s0vs[:,:,:,ne-2] = np.squeeze(unmask(s0,mask)) + + #Limited T2* and S0 maps + fl = np.zeros([nx,ny,nz,len(tes)-2+1]) + for ne in range(Ne-1): + fl_ = np.squeeze(fl[:,:,:,ne]) + fl_[masksum==ne+2] = True + fl[:,:,:,ne] = fl_ + fl = np.array(fl,dtype=bool) + t2sa = np.squeeze(unmask(t2ss[fl],masksum>1)) + s0va = np.squeeze(unmask(s0vs[fl],masksum>1)) + + #Full T2* maps with S0 estimation errors + t2saf = t2sa.copy() + s0vaf = s0va.copy() + t2saf[masksum==1] = t2ss[masksum==1,0] + s0vaf[masksum==1] = s0vs[masksum==1,0] + + return t2sa,s0va,t2ss,s0vs,t2saf,s0vaf + +def get_coeffs(data,mask,X,add_const=False): + """ + get_coeffs(data,X) + + Input: + + data has shape (nx,ny,nz,nt) + mask has shape (nx,ny,nz) + X has shape (nt,nc) + + Output: + + out has shape (nx,ny,nz,nc) + """ + mdata = fmask(data,mask).transpose() + + X=np.atleast_2d(X) + if X.shape[0]==1: X=X.T + Xones = np.atleast_2d(np.ones(np.min(mdata.shape))).T + if add_const: X = np.hstack([X,Xones]) + + tmpbetas = np.linalg.lstsq(X,mdata)[0].transpose() + if add_const: tmpbetas = tmpbetas[:,:-1] + out = unmask(tmpbetas,mask) + + return out + + +def andb(arrs): + result = np.zeros(arrs[0].shape) + for aa in arrs: result+=np.array(aa,dtype=np.int) + return result + + +def optcom(data,t2s,tes,mask,useG=True): + """ + out = optcom(data,t2s) + + + Input: + + data.shape = (nx,ny,nz,Ne,Nt) + t2s.shape = (nx,ny,nz) + tes.shape = (Ne,) + + Output: + + out.shape = (nx,ny,nz,Nt) + """ + nx,ny,nz,Ne,Nt = data.shape + + if useG: + fdat = fmask(data,mask) + ft2s = fmask(t2sG,mask) + + else: + fdat = fmask(data,mask) + ft2s = fmask(t2s,mask) + + tes = tes[np.newaxis,:] + ft2s = ft2s[:,np.newaxis] + + if options.combmode == 'ste': + alpha = fdat.mean(-1)*tes + else: + alpha = tes * np.exp(-tes /ft2s) + + alpha = np.tile(alpha[:,:,np.newaxis],(1,1,Nt)) + + fout = np.average(fdat,axis = 1,weights=alpha) + out = unmask(fout,mask) + print 'Out shape is ', out.shape + return out + +def getelbow(ks,val=False): + #Elbow using linear projection method - moderate + ks = np.sort(ks)[::-1] + nc = ks.shape[0] + coords = np.array([np.arange(nc),ks]) + p = coords - np.tile(np.reshape(coords[:,0],(2,1)),(1,nc)) + b = p[:,-1] + b_hat = np.reshape(b/np.sqrt((b**2).sum()),(2,1)) + proj_p_b = p - np.dot(b_hat.T,p)*np.tile(b_hat,(1,nc)) + d = np.sqrt((proj_p_b**2).sum(axis=0)) + k_min_ind = d.argmax() + k_min = ks[k_min_ind] + if val: return ks[k_min_ind] + else: return k_min_ind + +def getelbow2(ks,val=False): + #Elbow using mean/variance method - conservative + ks = np.sort(ks)[::-1] + nk = len(ks) + ds = np.array([ (ks[nk-5-ii-1]>ks[nk-5-ii:nk].mean()+2*ks[nk-5-ii:nk].std()) for ii in range(nk-5) ][::-1],dtype=np.int) + dsum = [] + c_ = 0 + for d_ in ds: + c_=(c_+d_)*d_ + dsum.append(c_) + e2=np.argmax(np.array(dsum)) + elind = np.max([getelbow(ks),e2]) + if val: return ks[elind] + else: return elind + +def getelbow3(ks,val=False): + #Elbow using curvature - aggressive + ks = np.sort(ks)[::-1] + dKdt = ks[:-1]-ks[1:] + dKdt2 = dKdt[:-1]-dKdt[1:] + curv = np.abs((dKdt2/(1+dKdt[:-1]**2.)**(3./2.))) + curv[np.isnan(curv)]=-1*10**6 + maxcurv = np.argmax(curv)+2 + if val: return(ks[maxcurv]) + else:return maxcurv + +def getfbounds(ne): + F05s=[None,None,18.5,10.1,7.7,6.6,6.0,5.6,5.3,5.1,5.0] + F025s=[None,None,38.5,17.4,12.2,10,8.8,8.1,7.6,7.2,6.9] + F01s=[None,None,98.5,34.1,21.2,16.2,13.8,12.2,11.3,10.7,10.] + return F05s[ne-1],F025s[ne-1],F01s[ne-1] + +def eimask(dd,ees=None): + if ees==None: ees=range(dd.shape[1]) + imask = np.zeros([dd.shape[0],len(ees)]) + for ee in ees: + print ee + lthr = 0.001*scoreatpercentile(dd[:,ee,:].flatten(),98) + hthr = 5*scoreatpercentile(dd[:,ee,:].flatten(),98) + print lthr,hthr + imask[dd[:,ee,:].mean(1) > lthr,ee]=1 + imask[dd[:,ee,:].mean(1) > hthr,ee]=0 + return imask + +def dwtmat(mmix): + #ipdb.set_trace() + print "++Wavelet transforming data" + llt = len(np.hstack(pywt.dwt(mmix[0],'db2'))) + mmix_wt = np.zeros([mmix.shape[0],llt]) + for ii in range(mmix_wt.shape[0]): + wtx = pywt.dwt(mmix[ii],'db2') + #print len(wtx[0]),len(wtx[1]) + cAlen = len(wtx[0]) + mmix_wt[ii] = np.hstack(wtx) + return mmix_wt,cAlen + +def idwtmat(mmix_wt,cAl): + print "++Inverse wavelet transforming" + lt = len(pywt.idwt(mmix_wt[0,:cAl],mmix_wt[0,cAl:],'db2',correct_size=True)) + mmix_iwt = np.zeros([mmix_wt.shape[0],lt]) + for ii in range(mmix_iwt.shape[0]): + mmix_iwt[ii] = pywt.idwt(mmix_wt[ii,:cAl],mmix_wt[ii,cAl:],'db2',correct_size=True) + return mmix_iwt + +def tedpca(ste=0,mlepca=True): + nx,ny,nz,ne,nt = catd.shape + ste = np.array([int(ee) for ee in str(ste).split(',')]) + cAl = None + if len(ste) == 1 and ste[0]==-1: + print "-Computing PCA of optimally combined multi-echo data" + OCmask = makemask(OCcatd[:,:,:,np.newaxis,:]) + d = fmask(OCcatd,OCmask) + eim = eimask(d[:,np.newaxis,:]) + eim = eim[:,0]==1 + d = d[eim,:] + #ipdb.set_trace() + elif len(ste) == 1 and ste[0]==0: + print "-Computing PCA of spatially concatenated multi-echo data" + ste = np.arange(ne) + d = np.float64(fmask(catd,mask)) + eim = eimask(d)==1 + d = d[eim] + else: + print "-Computing PCA of TE #%s" % ','.join([str(ee) for ee in ste]) + d = np.float64(np.concatenate([fmask(catd[:,:,:,ee,:],mask)[:,np.newaxis,:] for ee in ste-1],axis=1)) + eim = eimask(d)==1 + eim = np.squeeze(eim) + d = np.squeeze(d[eim]) + + dz = ((d.T-d.T.mean(0))/d.T.std(0)).T #Variance normalize timeseries + dz = (dz-dz.mean())/dz.std() #Variance normalize everything + + #if wvpca: + # import ipdb + # ipdb.set_trace() + # print "++Transforming time series from time domain to wavelet domain." + # dz,cAl = dwtmat(dz) + + pcastate_fn = 'pcastate.pklgz' + + if not os.path.exists(pcastate_fn): + ##Do PC dimension selection + #Get eigenvalue cutoff + + if mlepca: + from sklearn.decomposition import PCA + ppca = PCA(n_components='mle', svd_solver='full') + ppca.fit(dz) + v = ppca.components_ + s = ppca.explained_variance_ + u = np.dot(np.dot(dz,v.T),np.diag(1./s)) + else: + u,s,v = np.linalg.svd(dz,full_matrices=0) + + sp = s/s.sum() + eigelb = sp[getelbow(sp)] + + spdif = np.abs(sp[1:]-sp[:-1]) + spdifh = spdif[spdif.shape[0]/2:] + spdmin = spdif.min() + spdthr = np.mean([spdifh.max(),spdmin]) + spmin = sp[(spdif.shape[0]/2)+(np.arange(spdifh.shape[0])[spdifh>=spdthr][0])+1] + spcum = [] + spcumv = 0 + for sss in sp: + spcumv+=sss + spcum.append(spcumv) + spcum = np.array(spcum) + + #Compute K and Rho for PCA comps + + #ipdb.set_trace() + + eimum = np.atleast_2d(eim) + eimum = np.transpose(eimum,np.argsort(np.atleast_2d(eim).shape)[::-1]) + eimum = np.array(np.squeeze(unmask(eimum.prod(1),mask)),dtype=np.bool) + vTmix = v.T + vTmixN =((vTmix.T-vTmix.T.mean(0))/vTmix.T.std(0)).T + #ctb,KRd,betasv,v_T = fitmodels2(catd,v.T,eimum,t2s,tes,mmixN=vTmixN) + none,ctb,betasv,v_T = fitmodels_direct(catd,v.T,eimum,t2s,tes,mmixN=vTmixN,full_sel=False) + ctb = ctb[ctb[:,0].argsort(),:] + ctb = np.vstack([ctb.T[0:3],sp]).T + + #Save state + print "Saving PCA" + pcastate = {'u':u,'s':s,'v':v,'ctb':ctb,'eigelb':eigelb,'spmin':spmin,'spcum':spcum} + try: + pcastate_f = gzip.open(pcastate_fn,'wb') + pickle.dump(pcastate,pcastate_f) + pcastate_f.close() + except: + print "Could not save PCA solution!" + + else: + print "Loading PCA" + pcastate_f = gzip.open(pcastate_fn,'rb') + pcastate = pickle.load(pcastate_f) + for key,val in pcastate.items(): exec(key + '=val') + + np.savetxt('comp_table_pca.txt',ctb[ctb[:,1].argsort(),:][::-1]) + np.savetxt('mepca_mix.1D',v[ctb[:,1].argsort()[::-1],:].T) + + kappas = ctb[ctb[:,1].argsort(),1] + rhos = ctb[ctb[:,2].argsort(),2] + fmin,fmid,fmax = getfbounds(ne) + kappa_thr = np.average(sorted([fmin,getelbow(kappas,True)/2,fmid]),weights=[kdaw,1,1]) + rho_thr = np.average(sorted([fmin,getelbow2(rhos,True)/2,fmid]),weights=[rdaw,1,1]) + if int(kdaw)==-1: + kappas_lim = kappas[andb([kappasfmin])==2] + #kappas_lim = kappas[andb([kappasfmin])==2] + kappa_thr = kappas_lim[getelbow(kappas_lim)] + rhos_lim = rhos[andb([rhosfmin])==2] + rho_thr = rhos_lim[getelbow(rhos_lim)] + options.stabilize=True + if int(kdaw)!=-1 and int(rdaw)==-1: + rhos_lim = rhos[andb([rhosfmin])==2] + rho_thr = rhos_lim[getelbow(rhos_lim)] + if options.stabilize: + pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(spcum<0.95,dtype=np.int)*np.array(ctb[:,2]>fmin,dtype=np.int)*np.array(ctb[:,1]>fmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int) + else: + pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int) + pcsel = pcscore > 0 + pcrej = np.array(pcscore==0,dtype=np.int)*np.array(ctb[:,3]>spmin,dtype=np.int) > 0 + + """ +kdaw=15 +rdaw=5 +kappa_thr = np.average(sorted([fmin,getelbow(kappas,True)/2,fmid]),weights=[kdaw,1,1]) +rho_thr = np.average(sorted([fmin,getelbow2(rhos,True)/2,fmid]),weights=[rdaw,1,1]) +print kappa_thr, rho_thr +pcscore = (np.array(ctb[:,1]>kappa_thr,dtype=np.int)+np.array(ctb[:,2]>rho_thr,dtype=np.int)+np.array(ctb[:,3]>eigelb,dtype=np.int))*np.array(ctb[:,3]>spmin,dtype=np.int)*np.array(ctb[:,1]!=F_MAX,dtype=np.int)*np.array(ctb[:,2]!=F_MAX,dtype=np.int) +np.array(pcscore>0).sum() + """ + + if 'DEBUG' in sys.argv: + import ipdb + ipdb.set_trace() + + dd = u.dot(np.diag(s*np.array(pcsel,dtype=np.int))).dot(v) + + #if wvpca: + # print "++Transforming PCA solution from wavelet domain to time domain" + # dd = idwtmat(dd,cAl) + + nc = s[pcsel].shape[0] + print pcsel + print "--Selected %i components. Minimum Kappa=%0.2f Rho=%0.2f" % (nc,kappa_thr,rho_thr) + + dd = ((dd.T-dd.T.mean(0))/dd.T.std(0)).T #Variance normalize timeseries + dd = (dd-dd.mean())/dd.std() #Variance normalize everything + + return nc,dd + +def tedica(dd,cost): + """ + Input is dimensionally reduced spatially concatenated multi-echo time series dataset from tedpca() + Output is comptable, mmix, smaps from ICA, and betas from fitting catd to mmix + """ + #Do ICA + climit = float("%s" % options.conv) + #icanode = mdp.nodes.FastICANode(white_comp=nc, white_parm={'svd':True},approach='symm', g=cost, fine_g=options.finalcost, limit=climit, verbose=True) + icanode = mdp.nodes.FastICANode(white_comp=nc,approach='symm', g=cost, fine_g=options.finalcost, primary_limit=climit*100, limit=climit, verbose=True) + icanode.train(dd) + smaps = icanode.execute(dd) + mmix = icanode.get_recmatrix().T + mmix = (mmix-mmix.mean(0))/mmix.std(0) + return mmix + +def write_split_ts(data,comptable,mmix,suffix=''): + mdata = fmask(data,mask) + betas = fmask(get_coeffs(unmask((mdata.T-mdata.T.mean(0)).T,mask),mask,mmix),mask) + dmdata = mdata.T-mdata.T.mean(0) + varexpl = (1-((dmdata.T-betas.dot(mmix.T))**2.).sum()/(dmdata**2.).sum())*100 + print 'Variance explained: ', varexpl , '%' + midkts = betas[:,midk].dot(mmix.T[midk,:]) + lowkts = betas[:,rej].dot(mmix.T[rej,:]) + if len(acc)!=0: + niwrite(unmask(betas[:,acc].dot(mmix.T[acc,:]),mask),aff,'_'.join(['hik_ts',suffix])+'.nii') + if len(midk)!=0: + niwrite(unmask(midkts,mask),aff,'_'.join(['midk_ts',suffix])+'.nii') + if len(rej)!=0: + niwrite(unmask(lowkts,mask),aff,'_'.join(['lowk_ts',suffix])+'.nii') + niwrite(unmask(fmask(data,mask)-lowkts-midkts,mask),aff,'_'.join(['dn_ts',suffix])+'.nii') + return varexpl + +def split_ts(data,comptable,mmix): + cbetas = get_coeffs(data-data.mean(-1)[:,:,:,np.newaxis],mask,mmix) + betas = fmask(cbetas,mask) + if len(acc)!=0: + hikts=unmask(betas[:,acc].dot(mmix.T[acc,:]),mask) + else: + hikts = None + return hikts,data-hikts + +def writefeats(cbetas,comptable,mmix,suffix=''): + #Write signal changes (dS) + niwrite(cbetas[:,:,:,:],aff,'_'.join(['betas',suffix])+'.nii') + niwrite(cbetas[:,:,:,acc],aff,'_'.join(['betas_hik',suffix])+'.nii') + #Compute features (dS/S) + if options.e2d==None: e2d=np.floor(ne/2)+1 + edm = fmask(catd[:,:,:,e2d-1,:],mask) + edms = edm/edm.std(-1)[:,np.newaxis] + edms[edm<1]=0 + hik,noise = split_ts(unmask(edms,mask),comptable,mmix) + noise = noise-noise.mean(-1)[:,:,:,np.newaxis] + + zfac = 1./(mmix.shape[0]-len(acc)-1)*(noise**2).sum(-1) #noise scaling + niwrite(zfac,aff,'zfac.nii') + + cbetam = fmask(cbetas[:,:,:,acc],mask) + cbetam = (cbetam-cbetam.mean(0))/cbetam.std(0) + cbetam = cbetam/fmask(zfac,mask)[:,np.newaxis] + cbetam[edm.mean(-1)<1,:] = 0 + + niwrite(unmask(cbetam,mask),aff,'_'.join(['feats',suffix])+'.nii') + +def computefeats2(data,mmix,mask,normalize=True): + #Write feature versions of components + data = data[mask] + data_vn = (data-data.mean(axis=-1)[:,np.newaxis])/data.std(axis=-1)[:,np.newaxis] + data_R = get_coeffs(unmask(data_vn,mask),mask,mmix)[mask] + data_R[data_R<-.999] = -0.999 + data_R[data_R>.999] = .999 + data_Z = np.arctanh(data_R) + if len(data_Z.shape)==1: data_Z = np.atleast_2d(data_Z).T + if normalize: + #data_Z2 = ((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis]).T + data_Z = (((data_Z.T-data_Z.mean(0)[:,np.newaxis])/data_Z.std(0)[:,np.newaxis]) + (data_Z.mean(0)/data_Z.std(0))[:,np.newaxis]).T + return data_Z + +def writefeats2(data,mmix,mask,suffix=''): + #Write feature versions of components + feats = computefeats2(data,mmix,mask) + niwrite(unmask(feats,mask),aff,'_'.join(['feats',suffix])+'.nii') + +def writect(comptable,ctname='',varexpl='-1',classarr=[]): + global acc,rej,midk,empty + if len(classarr)!=0: + acc,rej,midk,empty = classarr + nc = comptable.shape[0] + ts = time.time() + st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + sortab = comptable[comptable[:,1].argsort()[::-1],:] + if ctname=='': ctname = 'comp_table.txt' + open('accepted.txt','w').write(','.join([str(int(cc)) for cc in acc])) + open('rejected.txt','w').write(','.join([str(int(cc)) for cc in rej])) + open('midk_rejected.txt','w').write(','.join([str(int(cc)) for cc in midk])) + with open(ctname,'w') as f: + f.write("#\n#ME-ICA Component statistics table for: %s \n#Run on %s \n#\n" % (os.path.abspath(os.path.curdir),st) ) + f.write("#Dataset variance explained by ICA (VEx): %.02f \n" % ( varexpl ) ) + f.write("#Total components generated by decomposition (TCo): %i \n" % ( nc ) ) + f.write("#No. accepted BOLD-like components, i.e. effective degrees of freedom for correlation (lower bound; DFe): %i\n" % ( len(acc) ) ) + f.write("#Total number of rejected components (RJn): %i\n" % (len(midk)+len(rej)) ) + f.write("#Nominal degress of freedom in denoised time series (..._medn.nii.gz; DFn): %i \n" % (nt-len(midk)-len(rej)) ) + f.write("#ACC %s \t#Accepted BOLD-like components\n" % ','.join([str(int(cc)) for cc in acc]) ) + f.write("#REJ %s \t#Rejected non-BOLD components\n" % ','.join([str(int(cc)) for cc in rej]) ) + f.write("#MID %s \t#Rejected R2*-weighted artifacts\n" % ','.join([str(int(cc)) for cc in midk]) ) + f.write("#IGN %s \t#Ignored components (kept in denoised time series)\n" % ','.join([str(int(cc)) for cc in empty]) ) + f.write("#VEx TCo DFe RJn DFn \n") + f.write("##%.02f %i %i %i %i \n" % (varexpl,nc,len(acc),len(midk)+len(rej),nt-len(midk)-len(rej))) + f.write("# comp Kappa Rho %%Var %%Var(norm) \n") + for i in range(nc): + f.write('%d\t%f\t%f\t%.2f\t%.2f\n'%(sortab[i,0],sortab[i,1],sortab[i,2],sortab[i,3],sortab[i,4])) + +def gscontrol_raw(dtrank=4): + """ + This function uses the spatial global signal estimation approach to modify catd (global variable) to + removal global signal out of individual echo time series datasets. The spatial global signal is estimated + from the optimally combined data after detrending with a Legendre polynomial basis of order=0 and degree=dtrank. + """ + + print "++ Applying amplitude-based T1 equilibration correction" + + #Legendre polynomial basis for denoising + from scipy.special import lpmv + Lmix = np.array([lpmv(0,vv,np.linspace(-1,1,OCcatd.shape[-1])) for vv in range(dtrank)]).T + + #Compute mean, std, mask local to this function - inefficient, but makes this function a bit more modular + Gmu = OCcatd.mean(-1) + Gstd = OCcatd.std(-1) + Gmask = Gmu!=0 + + #Find spatial global signal + dat = OCcatd[Gmask] - Gmu[Gmask][:,np.newaxis] + sol = np.linalg.lstsq(Lmix,dat.T) #Legendre basis for detrending + detr = dat - np.dot(sol[0].T,Lmix.T)[0] + sphis = (detr).min(1) + sphis -= sphis.mean() + niwrite(unmask(sphis,Gmask),aff,'T1gs.nii',head) + + #Find time course of the spatial global signal, make basis with the Legendre basis + glsig = np.linalg.lstsq(np.atleast_2d(sphis).T,dat)[0] + glsig = (glsig-glsig.mean() ) / glsig.std() + np.savetxt('glsig.1D',glsig) + glbase = np.hstack([Lmix,glsig.T]) + + #Project global signal out of optimally combined data + sol = np.linalg.lstsq(np.atleast_2d(glbase),dat.T) + tsoc_nogs = dat - np.dot(np.atleast_2d(sol[0][dtrank]).T,np.atleast_2d(glbase.T[dtrank])) + Gmu[Gmask][:,np.newaxis] + + global OCcatd, sphis_raw + sphis_raw = sphis + niwrite(OCcatd,aff,'tsoc_orig.nii',head) + OCcatd = unmask(tsoc_nogs,Gmask) + niwrite(OCcatd,aff,'tsoc_nogs.nii',head) + + #Project glbase out of each echo + for ii in range(Ne): + dat = catd[:,:,:,ii,:][Gmask] + sol = np.linalg.lstsq(np.atleast_2d(glbase),dat.T) + e_nogs = dat - np.dot(np.atleast_2d(sol[0][dtrank]).T,np.atleast_2d(glbase.T[dtrank])) + catd[:,:,:,ii,:] = unmask(e_nogs,Gmask) + if 'DEBUG' in sys.argv: + ipdb.set_trace() + +def gscontrol_mmix(): + + Gmu = OCcatd.mean(-1) + Gstd = OCcatd.std(-1) + Gmask = Gmu!=0 + + """ + Compute temporal regression + """ + dat = (OCcatd[Gmask] - Gmu[Gmask][:,np.newaxis]) / Gstd[mask][:,np.newaxis] + solG = np.linalg.lstsq(mmix,dat.T) + resid = dat - np.dot(solG[0].T,mmix.T) + + """ + Build BOLD time series without amplitudes, and save T1-like effect + """ + bold_ts = np.dot(solG[0].T[:,acc],mmix[:,acc].T) + sphis = bold_ts.min(-1) + sphis -= sphis.mean() + print sphis.shape + niwrite(unmask(sphis,mask),aff,'sphis_hik.nii',header=head) + + """ + Find the global signal based on the T1-like effect + """ + sol = np.linalg.lstsq(np.atleast_2d(sphis).T,dat) + glsig = sol[0] + + """ + T1 correct time series by regression + """ + bold_noT1gs = bold_ts - np.dot(np.linalg.lstsq(glsig.T,bold_ts.T)[0].T,glsig) + niwrite(unmask(bold_noT1gs*Gstd[mask][:,np.newaxis],mask),aff,'hik_ts_OC_T1c.nii',header=head) + + """ + Make medn version of T1 corrected time series + """ + niwrite(Gmu[:,:,:,np.newaxis]+unmask((bold_noT1gs+resid)*Gstd[mask][:,np.newaxis] ,mask),aff,'dn_ts_OC_T1c.nii',header=head) + + """ + Orthogonalize mixing matrix w.r.t. T1-GS + """ + mmixnogs = mmix.T - np.dot(np.linalg.lstsq(glsig.T,mmix)[0].T,glsig) + mmixnogs_mu = mmixnogs.mean(-1) + mmixnogs_std = mmixnogs.std(-1) + mmixnogs_norm = (mmixnogs-mmixnogs_mu[:,np.newaxis])/mmixnogs_std[:,np.newaxis] + mmixnogs_norm = np.vstack([np.atleast_2d(np.ones(max(glsig.shape))),glsig,mmixnogs_norm]) + + """ + Write T1-GS corrected components and mixing matrix + """ + sol = np.linalg.lstsq(mmixnogs_norm.T,dat.T) + niwrite(unmask(sol[0].T[:,2:],mask),aff,'betas_hik_OC_T1c.nii',header=head) + np.savetxt('meica_mix_T1c.1D',mmixnogs) + + +def dwtmat(mmix): + if 'DEBUG' in sys.argv: + import ipdb + ipdb.set_trace() + print "++Wavelet transforming data" + llt = len(np.hstack(pywt.dwt(mmix[0],'db2'))) + mmix_wt = np.zeros([mmix.shape[0],llt]) + for ii in range(mmix_wt.shape[0]): + wtx = pywt.dwt(mmix[ii],'db2') + #print len(wtx[0]),len(wtx[1]) + cAlen = len(wtx[0]) + mmix_wt[ii] = np.hstack(wtx) + return mmix_wt,cAlen + +def idwtmat(mmix_wt,cAl): + print "++Inverse wavelet transforming" + lt = len(pywt.idwt(mmix_wt[0,:cAl],mmix_wt[0,cAl:],'db2',correct_size=True)) + mmix_iwt = np.zeros([mmix_wt.shape[0],lt]) + for ii in range(mmix_iwt.shape[0]): + mmix_iwt[ii] = pywt.idwt(mmix_wt[ii,:cAl],mmix_wt[ii,cAl:],'db2',correct_size=True) + return mmix_iwt + +def dwtcatd(): + if 'DEBUG' in sys.argv: + import ipdb + ipdb.set_trace() + stackmask = tile(mask,[1,1,ne]) + maskin = uncat2echos(catd,ne)[stackmask] + trans_mask_catd,newlen = dwtmat(uncat2echos(catd,ne)[stackmask]) + newcatd = unmask(trans_mask_catd,stackmask) + +def writeresults(): + print "++ Writing optimally combined time series" + ts = OCcatd + niwrite(ts,aff,'ts_OC.nii') + print "++ Writing Kappa-filtered optimally combined timeseries" + varexpl = write_split_ts(ts,comptable,mmix,'OC') + print "++ Writing signal versions of components" + ts_B = get_coeffs(ts,mask,mmix) + niwrite(ts_B[:,:,:,:],aff,'_'.join(['betas','OC'])+'.nii') + if len(acc)!=0: + niwrite(ts_B[:,:,:,acc],aff,'_'.join(['betas_hik','OC'])+'.nii') + print "++ Writing optimally combined high-Kappa features" + writefeats2(split_ts(ts,comptable,mmix)[0],mmix[:,acc],mask,'OC2') + print "++ Writing component table" + writect(comptable,'comp_table.txt',varexpl) + +def writeresults_echoes(): + for ii in range(ne): + print "++ Writing Kappa-filtered TE#%i timeseries" % (ii+1) + write_split_ts(catd[:,:,:,ii,:],comptable,mmix,'e%i' % (ii+1)) + +def ctabsel(ctabfile): + ctlines = open(ctabfile).readlines() + class_tags = ['#ACC','#REJ','#MID','#IGN'] + class_dict = {} + for ii,ll in enumerate(ctlines): + for kk in class_tags: + if ll[:4]==kk and ll[4:].strip() != '': + class_dict[kk] = ll[4:].split('#')[0].split(',') + return tuple([np.array(class_dict[kk],dtype=int) for kk in class_tags]) + + +################################################################################################### +# Begin Main +################################################################################################### + +if __name__=='__main__': + + parser=OptionParser() + parser.add_option('-d',"--orig_data",dest='data',help="Spatially Concatenated Multi-Echo Dataset",default=None) + parser.add_option('-e',"--TEs",dest='tes',help="Echo times (in ms) ex: 15,39,63",default=None) + parser.add_option('',"--mix",dest='mixm',help="Mixing matrix. If not provided, ME-PCA & ME-ICA (MDP) is done.",default=None) + parser.add_option('',"--ctab",dest='ctab',help="Component table extract pre-computed classifications from.",default=None) + parser.add_option('',"--manacc",dest='manacc',help="Comma separated list of manually accepted components",default=None) + parser.add_option('',"--strict",dest='strict',action='store_true',help="Ignore low-variance ambiguous components",default=False) + #parser.add_option('',"--wav",dest='wav',help="Perform wavelet PCA, default False",default=False) + parser.add_option('',"--no_gscontrol",dest='no_gscontrol',action='store_true',help="Control global signal using spatial approach",default=False) + parser.add_option('',"--kdaw",dest='kdaw',help="Dimensionality augmentation weight (Kappa). Default 10. -1 for low-dimensional ICA",default=10.) + parser.add_option('',"--rdaw",dest='rdaw',help="Dimensionality augmentation weight (Rho). Default 1. -1 for low-dimensional ICA",default=1.) + parser.add_option('',"--conv",dest='conv',help="Convergence limit. Default 2.5e-5",default='2.5e-5') + parser.add_option('',"--sourceTEs",dest='ste',help="Source TEs for models. ex: -ste 2,3 ; -ste 0 for all, -1 for opt. com. Default -1.",default=0-1) + parser.add_option('',"--combmode",dest='combmode',help="Combination scheme for TEs: t2s (Posse 1999, default),ste(Poser)",default='t2s') + parser.add_option('',"--denoiseTEs",dest='dne',action='store_true',help="Denoise each TE dataset separately",default=False) + parser.add_option('',"--initcost",dest='initcost',help="Initial cost func. for ICA: pow3,tanh(default),gaus,skew",default='tanh') + parser.add_option('',"--finalcost",dest='finalcost',help="Final cost func, same opts. as initial",default='tanh') + parser.add_option('',"--stabilize",dest='stabilize',action='store_true',help="Stabilize convergence by reducing dimensionality, for low quality data",default=False) + parser.add_option('',"--fout",dest='fout',help="Output TE-dependence Kappa/Rho SPMs",action="store_true",default=False) + parser.add_option('',"--filecsdata",dest='filecsdata',help="Save component selection data",action="store_true",default=False) + parser.add_option('',"--label",dest='label',help="Label for output directory.",default=None) + + (options,args) = parser.parse_args() + args = list(set(args) - set(['DEBUG'])) + + print "-- ME-PCA/ME-ICA Component for ME-ICA %s--" % __version__ + + if options.tes==None or options.data==None: + print "*+ Need at least data and TEs, use -h for help." + sys.exit() + + print "++ Loading Data" + tes = np.fromstring(options.tes,sep=',',dtype=np.float32) + ne = tes.shape[0] + catim = nib.load(options.data) + head = catim.get_header() + head.extensions = [] + head.set_sform(head.get_sform(),code=1) + aff = catim.get_affine() + catd = cat2echos(catim.get_data(),ne) + nx,ny,nz,Ne,nt = catd.shape + mu = catd.mean(axis=-1) + sig = catd.std(axis=-1) + + """Parse options, prepare output directory""" + if options.fout: options.fout = aff + else: options.fout=None + kdaw = float(options.kdaw) + rdaw = float(options.rdaw) + if options.label!=None: dirname='%s' % '.'.join(['TED',options.label]) + else: dirname='TED' + os.system('mkdir %s' % dirname) + if options.mixm!=None: + try: + os.system('cp %s %s/meica_mix.1D; cp %s %s/%s' % (options.mixm,dirname,options.mixm,dirname,os.path.basename(options.mixm))) + print('++ JAVIER: cp %s %s/meica_mix.1D; cp %s %s/%s' % (options.mixm,dirname,options.mixm,dirname,os.path.basename(options.mixm))) + except: pass + if options.ctab!=None: + try: + os.system('cp %s %s/comp_table.txt; cp %s %s/%s' % (options.ctab,dirname,options.ctab,dirname,os.path.basename(options.ctab))) + print('++ JAVIER: cp %s %s/compt_table.txt; cp %s %s/%s' % (options.ctab,dirname,options.ctab,dirname,os.path.basename(options.ctab))) + except: pass + + os.chdir(dirname) + + print "++ Computing Mask" + mask,masksum = makeadmask(catd,min=False,getsum=True) + + print "++ Computing T2* map" + t2s,s0,t2ss,s0s,t2sG,s0G = t2sadmap(catd,mask,tes) + + #Condition values + cap_t2s = scoreatpercentile(t2s.flatten(),99.5) + t2s[t2s>cap_t2s*10]=cap_t2s + niwrite(s0,aff,'s0v.nii') + niwrite(t2s,aff,'t2sv.nii') + niwrite(t2ss,aff,'t2ss.nii') + niwrite(s0s,aff,'s0vs.nii') + niwrite(s0G,aff,'s0vG.nii') + niwrite(t2sG,aff,'t2svG.nii') + + #Optimally combine data + OCcatd = optcom(catd,t2s,tes,mask) + + if not options.no_gscontrol: + gscontrol_raw() + + if options.mixm == None: + print "++ Doing ME-PCA and ME-ICA with MDP" + import mdp + if 'DEBUG' in sys.argv: + import ipdb + ipdb.set_trace() + + nc,dd = tedpca(options.ste) + mmix_orig = tedica(dd,cost=options.initcost) + np.savetxt('__meica_mix.1D',mmix_orig) + seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout,reindex=True) + np.savetxt('meica_mix.1D',mmix) + if 'GROUP0' in sys.argv: + group0_flag = True + else: group0_flag = False + acc,rej,midk,empty = selcomps(seldict,knobargs=args,debug=debug_mode,group0_only=group0_flag,strict_mode=options.strict) + del dd + else: + print "++ Using Components provided by user" + mmix_orig = np.loadtxt('meica_mix.1D') + print "++ INFO: mmix_orig.shape = %s" % str(mmix_orig.shape) + print "++ INFO: mask.shape %s" % str(mask.shape) + print "++ INFO: catd.shape %s" % str(catd.shape) + eim = eimask(np.float64(fmask(catd,mask)))==1 + print "++ INFO: eim.shape %s" % str(eim.shape) + eimum = np.array(np.squeeze(unmask(np.array(eim,dtype=np.int).prod(1),mask)),dtype=np.bool) + seldict,comptable,betas,mmix = fitmodels_direct(catd,mmix_orig,mask,t2s,tes,options.fout) + if options.ctab == None: + print "++ Creating Comp Table" + acc,rej,midk,empty = selcomps(seldict,knobargs=args,debug=debug_mode,strict_mode=options.strict) + else: + print "++ Using User provided Comp Table" + #acc,rej,midk,empty = ctabsel(ctabfile) + acc,rej,midk,empty = ctabsel(options.ctab) + print "++ INFO: ACC=%s" % str(acc) + print "++ INFO: REJ=%s" % str(rej) + print "++ INFO: MIDK=%s" % str(midk) + print "++ INFO: IGN=%s" % str(empty) + if len(acc)==0: + print "\n** WARNING! No BOLD components detected!!! Please check data and results!\n" + + writeresults() + gscontrol_mmix() + if options.dne: writeresults_echoes()