Skip to content

Parallel #113

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 22 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
faea568
added parallel, testing and 3D testing
oliverchampion Jul 25, 2025
a0ad54a
implemented parallel + testing; 3D + testing
oliverchampion Jul 29, 2025
bc69159
fixed bugs
oliverchampion Jul 29, 2025
3e446fe
last updates
oliverchampion Jul 29, 2025
ee96f11
typo
oliverchampion Jul 29, 2025
0c1067e
Update conftest.py
oliverchampion Jul 29, 2025
cace5b1
updated all typo's and errors. passes tests locally now
oliverchampion Jul 30, 2025
23e711c
SLS TRF removed
oliverchampion Jul 30, 2025
bdcd9b9
download data in right location
oliverchampion Jul 30, 2025
076b776
Merge branch 'main' into parallel
oliverchampion Jul 30, 2025
fdc74db
Update conftest.py
oliverchampion Jul 30, 2025
1e3cecd
Merge branch 'parallel' of https://github.com/OSIPI/TF2.4_IVIM-MRI_Co…
oliverchampion Jul 30, 2025
d64fd6e
Update test_ivim_fit.py
oliverchampion Jul 30, 2025
2f4174c
test on 2 cores (default avail on github) and with less strict time b…
oliverchampion Aug 1, 2025
aeee141
fix some of the failed tests and reduce output
oliverchampion Aug 4, 2025
270b95d
resolved some of the failing tests
oliverchampion Aug 4, 2025
ae2bd26
fix typos
oliverchampion Aug 4, 2025
127d966
remove hard fail on not making parallle faster. Just a warning now.
oliverchampion Aug 5, 2025
1c6d6f2
hope it does not trigger the AI fits. They are not triggered locally...
oliverchampion Aug 5, 2025
04a19d3
summarize warnings
oliverchampion Aug 8, 2025
1e9e2c6
Update unit_test.yml
oliverchampion Aug 8, 2025
bd265c4
Update unit_test.yml
oliverchampion Aug 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/unit_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,5 @@ jobs:
- name: Test with pytest
run: |
pip install pytest pytest-cov
python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html
python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html -r w

1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
__pycache__/
*.nii.gz
*.nii
*.npy
*.dcm
*.mat
*.raw
Expand Down
41 changes: 37 additions & 4 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import json
import csv
# import datetime

import numpy as np
from phantoms.MR_XCAT_qMRI.sim_ivim_sig import phantom

def pytest_addoption(parser):
parser.addoption(
Expand Down Expand Up @@ -243,8 +244,8 @@ def algorithmlist(algorithmFile):
algorithms = algorithm_information["algorithms"]
for algorithm in algorithms:
algorithm_dict = algorithm_information.get(algorithm, {})
requires_matlab = algorithm_dict.get("requires_matlab", False)
yield algorithm, requires_matlab, algorithm_dict.get('deep_learning', False)
if not algorithm_dict.get('deep_learning', False):
yield algorithm, algorithm_dict

def bound_input(datafile,algorithmFile):
# Find the algorithms from algorithms.json
Expand Down Expand Up @@ -289,4 +290,36 @@ def deep_learning_algorithms(datafile,algorithmFile):
kwargs = algorithm_dict.get("options", {})
requires_matlab = algorithm_dict.get("requires_matlab", False)
tolerances = algorithm_dict.get("tolerances", {"atol":{"f": 2e-1, "D": 8e-4, "Dp": 8e-2},"rtol":{"f": 0.2, "D": 0.3, "Dp": 0.4}})
yield algorithm, all_data, bvals, kwargs, requires_matlab, tolerances
yield algorithm, all_data, bvals, kwargs, requires_matlab, tolerances


@pytest.fixture(scope="session")
def threeddata(request):
current_folder = pathlib.Path.cwd()
datafile = request.config.getoption("dataFile")
generic = current_folder / datafile
with generic.open() as f:
all_data = json.load(f)
bvals = all_data.pop('config')
bvals = np.array(bvals['bvalues'])
sig, _, Dim, fim, Dpim, _=phantom(bvals, 1/1000, TR=3000, TE=40, motion=False, rician=False, interleaved=False, T1T2=True)
return sig[::4,::4,::1,:], Dim[::4,::4,::1], fim[::4,::4,::1], Dpim[::4,::4,::1], bvals


'''''@pytest.fixture
def pardat(request):
datafile = request.config.getoption("dataFile")
current_folder = pathlib.Path.cwd()
generic = current_folder / datafile
with generic.open() as f:
all_data = json.load(f)
bvals = all_data.pop('config')['bvalues']

data_list = []
for name, data in all_data.items():
signal = np.asarray(data["data"])
signal /= signal[0]
data_list.append(signal)

big_array = np.stack(data_list, axis=0)
return big_array, bvals'''''
12 changes: 8 additions & 4 deletions phantoms/MR_XCAT_qMRI/sim_ivim_sig.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,33 @@
import argparse
import os
from utilities.data_simulation.Download_data import download_data
import pathlib

##########
# code written by Oliver J Gurney-Champion
# code adapted from MAtlab code by Eric Schrauben: https://github.com/schrau24/XCAT-ERIC
# This code generates a 4D IVIM phantom as nifti file

def phantom(bvalue, noise, TR=3000, TE=40, motion=False, rician=False, interleaved=False,T1T2=True):
download_data()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This downloads data during testing now, correct? Is that slow or burdensome?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes and no. The volume, parallel and AI tests just last long, full stop. Downloading the data is relatively quick compared to that less than minute Vs hours).

I think there are two ways we can go here.

I think I could accelerate the volume fit by dramatically reducing the number of vowels; there probably is no need for high resolution.

For the parallel fit, the smaller I make the number of vowels/volume, the less of an acceleration we get from going parallel and the harder it is to test this. For small volumes, typically linear goes way faster. That is why I ended up increasing the volume.

So either we can make a large volume (then the download time is neglectable) and do long tests. Or, we can go for a small volume and ignore the parallel timing. In that cases precalculting the volume and saving it will be faster.

That said, the AI tests also last 5+ minutes per algorithm and we must decide whether we want that to be done every merge...

Either way, all building blocks are here now :)

np.random.seed(42)
if motion:
states = range(1,21)
else:
states = [1]
for state in states:
# Load the .mat file
mat_data = loadmat('../../download/Phantoms/XCAT_MAT_RESP/XCAT5D_RP_' + str(state) + '_CP_1.mat')
project_root = pathlib.Path(__file__).resolve().parent.parent
filename = f'XCAT5D_RP_{state}_CP_1.mat'
mat_path = project_root / '..' /'download' / 'Phantoms' / 'XCAT_MAT_RESP' / filename
mat_data = loadmat(mat_path)

# Access the variables in the loaded .mat file
XCAT = mat_data['IMG']
XCAT = XCAT[-1:0:-2,-1:0:-2,10:160:4]

D, f, Ds = contrast_curve_calc()
S, Dim, fim, Dpim, legend = XCAT_to_MR_DCE(XCAT, TR, TE, bvalue, D, f, Ds,T1T2=T1T2)
S, Dim, fim, Dpim, legend = XCAT_to_MR_IVIM(XCAT, TR, TE, bvalue, D, f, Ds,T1T2=T1T2)
if state == 1:
Dim_out = Dim
fim_out = fim
Expand Down Expand Up @@ -187,7 +192,7 @@ def contrast_curve_calc():
return D, f, Ds


def XCAT_to_MR_DCE(XCAT, TR, TE, bvalue, D, f, Ds, b0=3, ivim_cont = True, T1T2=True):
def XCAT_to_MR_IVIM(XCAT, TR, TE, bvalue, D, f, Ds, b0=3, ivim_cont = True, T1T2=True):
###########################################################################################
# This script converts XCAT tissue values to MR contrast based on the SSFP signal equation.
# Christopher W. Roy 2018-12-04 # [email protected]
Expand Down Expand Up @@ -436,7 +441,6 @@ def parse_bvalues_file(file_path):
motion = args.motion
interleaved = args.interleaved
T1T2 = args.T1T2
download_data()
for key, bvalue in bvalues.items():
bvalue = np.array(bvalue)
sig, XCAT, Dim, fim, Dpim, legend = phantom(bvalue, noise, motion=motion, interleaved=interleaved,T1T2=T1T2)
Expand Down
5 changes: 4 additions & 1 deletion pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,7 @@ markers =
slow: marks tests as slow (deselect with '-m "not slow"')
addopts =
-m 'not slow'
testpaths = tests
testpaths = tests
filterwarnings =
ignore::Warning
always::tests.IVIMmodels.unit_tests.test_ivim_fit.PerformanceWarning
11 changes: 9 additions & 2 deletions src/original/ETP_SRI/LinearFitting.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import numpy as np
import numpy.polynomial.polynomial as poly
import warnings

from utilities.data_simulation.GenerateData import GenerateData

Expand Down Expand Up @@ -82,11 +83,17 @@ def ivim_fit(self, bvalues, signal):
# print(Dp_prime)

if np.any(np.asarray(Dp_prime) < 0) or not np.all(np.isfinite(Dp_prime)):
print('Perfusion fit failed')
warnings.warn('Perfusion fit failed',
category=UserWarning,
stacklevel=2 # Ensures correct file/line info in the warning
)
Dp_prime = [0, 0]
f = signal[0] - D[0]
else:
print("This doesn't seem to be an IVIM set of b-values")
warnings.warn('This doesn\'t seem to be an IVIM set of b-values',
category=UserWarning,
stacklevel=2 # Ensures correct file/line info in the warning
)
f = 1
Dp_prime = [0, 0]
D = D[1]
Expand Down
10 changes: 7 additions & 3 deletions src/original/PvH_KB_NKI/DWI_functions_standalone.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
"""

import numpy as np
import warnings


def generate_ADC_standalone(DWIdata, bvalues, bmin: int = 150, bmax: int = 1000, specificBvals: list = []):
"""
Expand Down Expand Up @@ -116,9 +118,11 @@ def generate_IVIMmaps_standalone(DWIdata, bvalues, bminADC=150, bmaxADC=1000, bm
raise Exception('b=0 was not acquired')

if len(bvalues) > 2:
print('More than two b-values were detected for D* calculation. ' +
'Note that only the first two will be used for D*.')

warnings.warn(
'More than two b-values were detected for D* calculation. ' + 'Note that only the first two will be used for D*.',
category=UserWarning,
stacklevel=2 # Ensures correct file/line info in the warning
)
except Exception as e:
print("Could not calculate D* due to an error: " + str(e))
return
Expand Down
1 change: 1 addition & 0 deletions src/original/TF_reference/segmented_IVIMfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ def segmented_IVIM_fit(bvalues, dw_data, b_cutoff = 200, bounds=([0.0001, 0.0, 0
"""
bvalues_D = bvalues[bvalues >= b_cutoff]
dw_data_D = dw_data[bvalues >= b_cutoff]
dw_data_D = np.clip(dw_data_D, 1e-6, None) # ensure we do not get 0 or negative values that cause nans/infs
log_data_D = np.log(dw_data_D)

D, b0_intercept = d_fit_iterative_wls(bvalues_D, log_data_D)
Expand Down
2 changes: 2 additions & 0 deletions src/standardized/ETP_SRI_LinearFitting.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import numpy as np
from src.wrappers.OsipiBase import OsipiBase
from src.original.ETP_SRI.LinearFitting import LinearFit
import warnings
warnings.simplefilter('once', UserWarning)


class ETP_SRI_LinearFitting(OsipiBase):
Expand Down
5 changes: 3 additions & 2 deletions src/standardized/IAR_LU_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,9 @@ def ivim_fit_full_volume(self, signals, bvalues, **kwargs):
gtab = gradient_table(bvalues, bvec, b0_threshold=0)

self.IAR_algorithm = IvimModelBiExp(gtab, bounds=self.bounds, initial_guess=self.initial_guess)

fit_results = self.IAR_algorithm.fit(signals)
b0_index = np.where(bvalues == 0)[0][0]
mask = signals[...,b0_index]>0.01
fit_results = self.IAR_algorithm.fit(signals, mask=mask)

results = {}
results["f"] = fit_results.model_params[..., 1]
Expand Down
5 changes: 5 additions & 0 deletions src/standardized/IAR_LU_modified_mix.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class IAR_LU_modified_mix(OsipiBase):
supported_dimensions = 1
supported_priors = False


def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, weighting=None, stats=False):
"""
Everything this algorithm requires should be implemented here.
Expand All @@ -47,6 +48,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False

# Additional options
self.stochastic = True

# Check the inputs

# Initialize the algorithm
Expand Down
3 changes: 3 additions & 0 deletions src/standardized/IVIM_NEToptim.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ def initialize(self, bounds, initial_guess, fitS0, traindata, SNR):
self.fitS0=fitS0
self.deep_learning = True
self.supervised = False
# Additional options
self.stochastic = True

if traindata is None:
warnings.warn('no training data provided (traindata = None). Training data will be simulated')
if SNR is None:
Expand Down
58 changes: 41 additions & 17 deletions src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def initialize(self, bounds=None, initial_guess=None, fitS0=True, prior_in=None,
self.neg_log_prior=flat_neg_log_prior([self.bounds[0][0],self.bounds[1][0]],[self.bounds[0][1],self.bounds[1][1]],[self.bounds[0][2],self.bounds[1][2]],[self.bounds[0][3],self.bounds[1][3]])
else:
print('warning, bounds are not used, as a prior is used instead')
if len(prior_in) is 4:
if len(prior_in) == 4:
self.neg_log_prior = empirical_neg_log_prior(prior_in[0], prior_in[1], prior_in[2],prior_in[3])
else:
self.neg_log_prior = empirical_neg_log_prior(prior_in[0], prior_in[1], prior_in[2])
Expand Down Expand Up @@ -113,37 +113,61 @@ def ivim_fit(self, signals, bvalues, initial_guess=None, **kwargs):

return results

def ivim_fit_full_volume(self, signals, bvalues, initial_guess=None, **kwargs):
def ivim_fit_full_volume(self, signals, bvalues, njobs=4, **kwargs):
"""Perform the IVIM fit

Args:
signals (array-like)
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.

Returns:
_type_: _description_
"""
signals = self.reshape_to_voxelwise(signals)
# normalize signals
# Get index of b=0
shape=np.shape(signals)

b0_index = np.where(bvalues == 0)[0][0]
# Mask of voxels where signal at b=0 >= 0.5
valid_mask = signals[..., b0_index] >= 0.01
# Select only valid voxels for fitting
signals = signals[valid_mask]

minimum_bvalue = np.min(bvalues) # We normalize the signal to the minimum bvalue. Should be 0 or very close to 0.
b0_indices = np.where(bvalues == minimum_bvalue)[0]
normalization_factor = np.mean(signals[..., b0_indices],axis=-1)
signals = signals / np.repeat(normalization_factor[...,np.newaxis],np.shape(signals)[-1],-1)

bvalues=np.array(bvalues)

epsilon = 0.000001
fit_results = fit_segmented_array(bvalues, signals, bounds=self.bounds, cutoff=self.thresholds, p0=self.initial_guess)
fit_results=np.array(fit_results+(1,))
for i in range(4):
if fit_results[i] < self.bounds[0][i] : fit_results[0] = self.bounds[0][i]+epsilon
if fit_results[i] > self.bounds[1][i] : fit_results[0] = self.bounds[1][i]-epsilon
fit_results = self.OGC_algorithm_array(bvalues, signals, self.neg_log_prior, x0=fit_results, fitS0=self.fitS0, bounds=self.bounds)
fit_results = np.array(fit_segmented_array(bvalues, signals, bounds=self.bounds, cutoff=self.thresholds, p0=self.initial_guess))
#fit_results=np.array(fit_results+(1,))
# Loop over parameters (rows)

for i in range(4):
if i == 3:
fit_results[i] = np.random.normal(1,0.2,np.shape(fit_results[i]))
else:
below = fit_results[i] < self.bounds[0][i]
above = fit_results[i] > self.bounds[1][i]

fit_results[i, below] = self.bounds[0][i] + epsilon
fit_results[i, above] = self.bounds[1][i] - epsilon
self.jobs=njobs
fit_results = self.OGC_algorithm_array(bvalues, signals,fit_results, self)

D=np.zeros(shape[0:-1])
D[valid_mask]=fit_results[0]
f=np.zeros(shape[0:-1])
f[valid_mask]=fit_results[1]
Dp=np.zeros(shape[0:-1])
Dp[valid_mask]=fit_results[2]
results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["Dp"] = fit_results[2]

results["D"] = D
results["f"] = f
results["Dp"] = Dp
return results



def reshape_to_voxelwise(self, data):
"""
reshapes multi-D input (spatial dims, bvvalue) data to 2D voxel-wise array
Expand All @@ -154,4 +178,4 @@ def reshape_to_voxelwise(self, data):
"""
B = data.shape[-1]
voxels = int(np.prod(data.shape[:-1])) # e.g., X*Y*Z
return data.reshape(voxels, B)
return data.reshape(voxels, B)
21 changes: 0 additions & 21 deletions src/standardized/OGC_AmsterdamUMC_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,25 +84,4 @@ def ivim_fit(self, signals, bvalues, **kwargs):
results["f"] = fit_results[1]
results["Dp"] = fit_results[2]

return results

def ivim_fit_full_volume(self, signals, bvalues, **kwargs):
"""Perform the IVIM fit

Args:
signals (array-like)
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.

Returns:
_type_: _description_
"""

bvalues=np.array(bvalues)
fit_results = self.OGC_algorithm_array(bvalues, signals, p0=self.initial_guess, bounds=self.bounds, fitS0=self.fitS0)

results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["Dp"] = fit_results[2]

return results
2 changes: 1 addition & 1 deletion src/standardized/OGC_AmsterdamUMC_biexp_segmented.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,4 @@ def ivim_fit(self, signals, bvalues, **kwargs):
results["f"] = fit_results[1]
results["Dp"] = fit_results[2]

return results
return results
2 changes: 2 additions & 0 deletions src/standardized/PvH_KB_NKI_IVIMfit.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from src.wrappers.OsipiBase import OsipiBase
from src.original.PvH_KB_NKI.DWI_functions_standalone import generate_IVIMmaps_standalone, generate_ADC_standalone
import numpy as np
import warnings
warnings.simplefilter('once', UserWarning)

class PvH_KB_NKI_IVIMfit(OsipiBase):
"""
Expand Down
4 changes: 4 additions & 0 deletions src/standardized/Super_IVIM_DC.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,10 @@ def initialize(self, bounds, initial_guess, fitS0, SNR, working_dir=os.getcwd(),
self.use_bounds = False
self.deep_learning = True
self.supervised = True

# Additional options
self.stochastic = True

modeldir = Path(working_dir) # Ensure it's a Path object
modeldir = modeldir / "models"
modeldir.mkdir(parents=True, exist_ok=True)
Expand Down
Loading
Loading