diff --git a/src/pipelines/Segments.py b/src/pipelines/Segments.py new file mode 100644 index 0000000..99eb876 --- /dev/null +++ b/src/pipelines/Segments.py @@ -0,0 +1,140 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="segmentformshape") +class ArterialSegExample(ProcessPipeline): + """ + Tutorial pipeline showing the full surface area of a pipeline: + + - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. + - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. + - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. + - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). + - No input data is required; this pipeline is purely illustrative. + """ + + description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." + v_raw = "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegment/value" + v = "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegmentBandLimited/value" + T = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + + def run(self, h5file) -> ProcessResult: + vraw_ds = np.asarray(h5file[self.v_raw]) + v_ds = np.asarray(h5file[self.v]) + t_ds = np.asarray(h5file[self.T]) + + moment0_seg = 0 + moment1_seg = 0 + + TMI_seg = [] + RI_seg = [] + RVTI_seg = [] + for beat in range(len(v_ds[0, :, 0, 0])): + TMI_branch = [] + RI_branch = [] + RVTI_branch = [] + for branch in range(len(v_ds[0, beat, :, 0])): + speed = np.nanmean(v_ds[:, beat, branch, :], axis=1) + moment0_seg += np.sum(speed) + for i in range(len(speed)): + moment1_seg += speed[i] * i * t_ds[0][beat] / 64 + centroid_seg_branch = (moment1_seg) / (moment0_seg * t_ds[0][0]) + TMI_branch.append(centroid_seg_branch) + + speed_max = np.max(speed) + speed_min = np.min(speed) + RI_k = 1 - (speed_min / speed_max) + RI_branch.append(RI_k) + + epsilon = 10 ** (-12) + moitie = len(speed) // 2 + d1 = np.sum(speed[:moitie]) + d2 = np.sum(speed[moitie:]) + RVTI_k = d1 / (d2 + epsilon) + RVTI_branch.append(RVTI_k) + + RI_seg.append(RI_branch) + TMI_seg.append(TMI_branch) + RVTI_seg.append(RVTI_branch) + + moment0raw_seg = 0 + moment1raw_seg = 0 + + TMIraw_seg = [] + RIraw_seg = [] + RVTIraw_seg = [] + for beat in range(len(vraw_ds[0, :, 0, 0])): + TMIraw_branch = [] + RIraw_branch = [] + RVTIraw_branch = [] + for branch in range(len(vraw_ds[0, beat, :, 0])): + speed_raw = np.nanmean(vraw_ds[:, beat, branch, :], axis=1) + moment0raw_seg += np.sum(speed_raw) + for i in range(len(speed_raw)): + moment1raw_seg += speed_raw[i] * i * t_ds[0][beat] / 64 + centroidraw_seg_branch = (moment1raw_seg) / ( + moment0raw_seg * t_ds[0][0] + ) + TMIraw_branch.append(centroidraw_seg_branch) + + speedraw_max = np.max(speed_raw) + speedraw_min = np.min(speed_raw) + RIraw_k = 1 - (speedraw_min / speedraw_max) + RIraw_branch.append(RIraw_k) + + epsilon = 10 ** (-12) + moitie = len(speed_raw) // 2 + d1raw = np.sum(speed_raw[:moitie]) + d2raw = np.sum(speed_raw[moitie:]) + RVTIraw_k = d1raw / (d2raw + epsilon) + RVTIraw_branch.append(RVTIraw_k) + + RIraw_seg.append(RIraw_branch) + TMIraw_seg.append(TMIraw_branch) + RVTIraw_seg.append(RVTIraw_branch) + + # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. + metrics = { + "centroid": with_attrs( + np.asarray(TMI_seg), + { + "unit": [""], + }, + ), + "RI": with_attrs( + np.asarray(RI_seg), + { + "unit": [""], + }, + ), + "RTVI": with_attrs( + np.asarray(RVTI_seg), + { + "unit": [""], + }, + ), + "centroid raw": with_attrs( + np.asarray(TMIraw_seg), + { + "unit": [""], + }, + ), + "RI raw": with_attrs( + np.asarray(RIraw_seg), + { + "unit": [""], + }, + ), + "RTVI raw": with_attrs( + np.asarray(RVTIraw_seg), + { + "unit": [""], + }, + ), + } + + # Artifacts can store non-metric outputs (strings, paths, etc.). + + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/arterial_velocity_waveform_shape_metrics.py b/src/pipelines/arterial_velocity_waveform_shape_metrics.py deleted file mode 100644 index cd0fb2a..0000000 --- a/src/pipelines/arterial_velocity_waveform_shape_metrics.py +++ /dev/null @@ -1,123 +0,0 @@ -import numpy as np - -from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs - - -@registerPipeline(name="arterial_waveform_shape_metrics") -class ArterialExample(ProcessPipeline): - """ - Tutorial pipeline showing the full surface area of a pipeline: - - - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. - - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. - - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. - - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). - - No input data is required; this pipeline is purely illustrative. - """ - - description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." - v_raw_input = "/Artery/VelocityPerBeat/VelocitySignalPerBeat/value" - v_bandlimited_input = ( - "/Artery/VelocityPerBeat/VelocitySignalPerBeatBandLimited/value" - ) - T = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" - v_bandlimited_max_input = "/Artery/VelocityPerBeat/VmaxPerBeatBandLimited/value" - v_bandlimited_min_input = "/Artery/VelocityPerBeat/VminPerBeatBandLimited/value" - - def run(self, h5file) -> ProcessResult: - v_raw = np.asarray(h5file[self.v_raw_input]) - v_raw = np.maximum(v_raw, 0) - v_bandlimited = np.asarray(h5file[self.v_bandlimited_input]) - v_bandlimited = np.maximum(v_bandlimited, 0) - T_ds = np.asarray(h5file[self.T]) - v_bandlimited_max = np.asarray(h5file[self.v_bandlimited_max_input]) - v_bandlimited_max = np.maximum(v_bandlimited_max, 0) - v_bandlimited_min = np.asarray(h5file[self.v_bandlimited_min_input]) - v_bandlimited_min = np.maximum(v_bandlimited_min, 0) - tau_M1_raw = [] - tau_M1_over_T_raw = [] - tau_M1_bandlimited = [] - tau_M1_over_T_bandlimited = [] - - R_VTI_bandlimited = [] - R_VTI_raw = [] - - RI_bandlimited = [] - RI_raw = [] - - ratio_systole_diastole_R_VTI = 0.5 - - for beat_idx in range(len(T_ds[0])): - t = T_ds[0][beat_idx] / len(v_raw.T[beat_idx]) - D1_raw = np.sum( - v_raw.T[beat_idx][ - : int(np.ceil(len(v_raw.T[0]) * ratio_systole_diastole_R_VTI)) - ] - ) - D2_raw = np.sum( - v_raw.T[beat_idx][ - int(np.ceil(len(v_raw.T[0]) * ratio_systole_diastole_R_VTI)) : - ] - ) - D1_bandlimited = np.sum( - v_bandlimited.T[beat_idx][ - : int( - np.ceil(len(v_bandlimited.T[0]) * ratio_systole_diastole_R_VTI) - ) - ] - ) - D2_bandlimited = np.sum( - v_bandlimited.T[beat_idx][ - int( - np.ceil(len(v_bandlimited.T[0]) * ratio_systole_diastole_R_VTI) - ) : - ] - ) - R_VTI_bandlimited.append(D2_bandlimited / (D1_bandlimited + 10 ** (-12))) - R_VTI_raw.append(D2_raw / (D1_raw + 10 ** (-12))) - M_0 = np.sum(v_raw.T[beat_idx]) - M_1 = 0 - for time_idx in range(len(v_raw.T[beat_idx])): - M_1 += v_raw[time_idx][beat_idx] * time_idx * t - TM1 = M_1 / M_0 - tau_M1_raw.append(TM1) - tau_M1_over_T_raw.append(TM1 / T_ds[0][beat_idx]) - - for beat_idx in range(len(T_ds[0])): - t = T_ds[0][beat_idx] / len(v_raw.T[beat_idx]) - M_0 = np.sum(v_bandlimited.T[beat_idx]) - M_1 = 0 - for time_idx in range(len(v_raw.T[beat_idx])): - M_1 += v_bandlimited[time_idx][beat_idx] * time_idx * t - TM1 = M_1 / M_0 - tau_M1_bandlimited.append(TM1) - tau_M1_over_T_bandlimited.append(TM1 / T_ds[0][beat_idx]) - - for beat_idx in range(len(v_bandlimited_max[0])): - RI_bandlimited_temp = 1 - ( - v_bandlimited_min[0][beat_idx] / v_bandlimited_max[0][beat_idx] - ) - RI_bandlimited.append(RI_bandlimited_temp) - - for beat_idx in range(len(v_bandlimited_max[0])): - RI_raw_temp = 1 - (np.min(v_raw.T[beat_idx]) / np.max(v_raw.T[beat_idx])) - RI_raw.append(RI_raw_temp) - - # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. - metrics = { - "tau_M1_raw": with_attrs(np.asarray(tau_M1_raw), {"unit": [""]}), - "tau_M1_bandlimited": np.asarray(tau_M1_bandlimited), - "tau_M1_over_T_raw": with_attrs( - np.asarray(tau_M1_over_T_raw), {"unit": [""]} - ), - "tau_M1_over_T_bandlimited": np.asarray(tau_M1_over_T_bandlimited), - "RI_bandlimited": np.asarray(RI_bandlimited), - "RI_raw": np.asarray(RI_raw), - "R_VTI_bandlimited": np.asarray(R_VTI_bandlimited), - "R_VTI_raw": np.asarray(R_VTI_raw), - "ratio_systole_diastole_R_VTI": np.asarray(ratio_systole_diastole_R_VTI), - } - - # Artifacts can store non-metric outputs (strings, paths, etc.). - - return ProcessResult(metrics=metrics) diff --git a/src/pipelines/arterial_waveform_shape_metrics.py b/src/pipelines/arterial_waveform_shape_metrics.py new file mode 100644 index 0000000..33f94f4 --- /dev/null +++ b/src/pipelines/arterial_waveform_shape_metrics.py @@ -0,0 +1,568 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="arterial_waveform_shape_metrics") +class ArterialSegExample(ProcessPipeline): + """ + Waveform-shape metrics on per-beat, per-branch, per-radius velocity waveforms. + + Expected segment layout: + v_seg[t, beat, branch, radius] + i.e. v_seg shape: (n_t, n_beats, n_branches, n_radii) + + Outputs + ------- + A) Per-segment (flattened branch x radius): + by_segment/*_segment : shape (n_beats, n_segments) + n_segments = n_branches * n_radii + seg_idx = branch_idx * n_radii + radius_idx (branch-major) + + B) Aggregated: + by_segment/*_branch : shape (n_beats, n_branches) (median over radii) + by_segment/*_global : shape (n_beats,) (mean over all branches & radii) + + C) Independent global metrics (from global waveform path): + global/* : shape (n_beats,) + + Definitions (gain-invariant / shape metrics) + -------------------------------------------- + Rectification: + v <- max(v, 0) with NaNs preserved + + Basic: + tau_M1 = M1 / M0 + tau_M1_over_T = (M1/M0) / T + + RI = 1 - vmin/vmax (robust) + PI = (vmax - vmin) / mean(v) (robust) + + RVTI (paper) = D1 / (D2 + eps), split at 1/2 T (ratio_rvti = 0.5) + + New: + SF_VTI (systolic fraction) = D1_1/3 / (D1_1/3 + D2_2/3 + eps) + where D1_1/3 is integral over first 1/3 of samples, D2_2/3 over remaining 2/3 + + Normalized central moments (shape, not scale): + mu2_norm = mu2 / (M0 * T^2 + eps) (variance-like) + + + with central moments around t_bar = tau_M1: + mu2 = sum(v * (t - t_bar)^2) + + + Quantile timing (on cumulative integral): + C(t) = cumsum(v) / sum(v) + t10_over_T,t25_over_T, t50_over_T,t75_over_T, t90_over_T + + Spectral shape ratios (per beat): + Compute FFT power P(f) of v(t). Define harmonic index h = f * T (cycles/beat). + E_total = sum_{h>=0} P + E_low = sum_{h in [1..H_LOW]} P + E_high = sum_{h in [H_HIGH1..H_HIGH2]} P + Return E_low_over_E_total and E_high_over_E_total + + Default bands: + low: 1..3 harmonics + high: 4..8 harmonics + """ + + description = "Waveform shape metrics (segment + aggregates + global), gain-invariant and robust." + + # Segment inputs + v_raw_segment_input = ( + "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegment/value" + ) + v_band_segment_input = "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegmentBandLimited/value" + + # Global inputs + v_raw_global_input = "/Artery/VelocityPerBeat/VelocitySignalPerBeat/value" + v_band_global_input = ( + "/Artery/VelocityPerBeat/VelocitySignalPerBeatBandLimited/value" + ) + + # Beat period + T_input = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + + # Parameters + eps = 1e-12 + ratio_rvti = 0.5 # split for RVTI + ratio_sf_vti = 1.0 / 3.0 # split for SF_VTI + + # Spectral bands (harmonic indices, inclusive) + H_LOW_MAX = 3 + H_HIGH_MIN = 4 + H_HIGH_MAX = 8 + + # ------------------------- + # Helpers + # ------------------------- + @staticmethod + def _rectify_keep_nan(x: np.ndarray) -> np.ndarray: + x = np.asarray(x, dtype=float) + return np.where(np.isfinite(x), np.maximum(x, 0.0), np.nan) + + @staticmethod + def _safe_nanmean(x: np.ndarray) -> float: + if x.size == 0 or not np.any(np.isfinite(x)): + return np.nan + return float(np.nanmean(x)) + + @staticmethod + def _safe_nanmedian(x: np.ndarray) -> float: + if x.size == 0 or not np.any(np.isfinite(x)): + return np.nan + return float(np.nanmedian(x)) + + @staticmethod + def _ensure_time_by_beat(v2: np.ndarray, n_beats: int) -> np.ndarray: + """ + Ensure v2 is shaped (n_t, n_beats). If it is (n_beats, n_t), transpose. + """ + v2 = np.asarray(v2, dtype=float) + if v2.ndim != 2: + raise ValueError(f"Expected 2D global waveform, got shape {v2.shape}") + + if v2.shape[1] == n_beats: + return v2 + if v2.shape[0] == n_beats and v2.shape[1] != n_beats: + return v2.T + + # Fallback: if ambiguous, assume (n_t, n_beats) + return v2 + + def _quantile_time_over_T(self, v: np.ndarray, Tbeat: float, q: float) -> float: + """ + v: rectified 1D waveform (NaNs allowed) + Returns t_q / Tbeat where C(t_q) >= q, with C = cumsum(v)/sum(v). + """ + if (not np.isfinite(Tbeat)) or Tbeat <= 0: + return np.nan + + if v.size == 0 or not np.any(np.isfinite(v)): + return np.nan + + vv = np.where(np.isfinite(v), v, 0.0) + m0 = float(np.sum(vv)) + if m0 <= 0: + return np.nan + + c = np.cumsum(vv) / m0 + idx = int(np.searchsorted(c, q, side="left")) + idx = max(0, min(v.size - 1, idx)) + + dt = Tbeat / v.size + t_q = idx * dt + return float(t_q / Tbeat) + + def _spectral_ratios(self, v: np.ndarray, Tbeat: float) -> tuple[float, float]: + """ + Return (E_low/E_total, E_high/E_total) using harmonic-index bands. + """ + if (not np.isfinite(Tbeat)) or Tbeat <= 0: + return np.nan, np.nan + + if v.size == 0 or not np.any(np.isfinite(v)): + return np.nan, np.nan + + vv = np.where(np.isfinite(v), v, 0.0) + + n = vv.size + if n < 2: + return np.nan, np.nan + + # Remove DC? For "shape" we typically keep DC in total energy but exclude it from low/high + # Here: total includes all bins (including DC). Low/high exclude DC by construction (harmonics >= 1). + fs = n / Tbeat # Hz + X = np.fft.rfft(vv) + P = np.abs(X) ** 2 + f = np.fft.rfftfreq(n, d=1.0 / fs) # Hz + h = f * Tbeat # cycles per beat (harmonic index, continuous) + # vv_no_mean = vv - np.mean(vv) + # idx_fund = np.argmax(A[1:]) + 1 + # f1 = f[idx_fund] + # V1 = A[idx_fund] + # if V1 <= 0: + # return np.nan, np.nan, np.nan + # HRI_2_10 = float(np.nan) + """for k in range(2, 11): + target_freq = k * f1 + + # trouver le bin le plus proche + idx = np.argmin(np.abs(f - target_freq)) + + # éviter de sortir du spectre + if idx < len(A): + HRI_2_10 += A[idx] / V1""" + E_total = float(np.sum(P)) + if not np.isfinite(E_total) or E_total <= 0: + return np.nan, np.nan, np.nan + + low_mask = (h >= 1.0) & (h <= float(self.H_LOW_MAX)) + high_mask = (h >= float(self.H_HIGH_MIN)) & (h <= float(self.H_HIGH_MAX)) + + E_low = float(np.sum(P[low_mask])) + E_high = float(np.sum(P[high_mask])) + + return float(E_low / E_total), float(E_high / E_total) + + def _compute_metrics_1d(self, v: np.ndarray, Tbeat: float) -> dict: + """ + Canonical metric kernel: compute all waveform-shape metrics from a single 1D waveform v(t). + Returns a dict of scalar metrics (floats). + """ + v = self._rectify_keep_nan(v) + n = int(v.size) + if n <= 0: + return {k: np.nan for k in self._metric_keys()} + + # If Tbeat invalid, many metrics become NaN + if (not np.isfinite(Tbeat)) or Tbeat <= 0: + return {k: np.nan for k in self._metric_keys()} + + vv = np.where( + np.isfinite(v), v, np.nan + ) # vv = np.where(np.isfinite(v), v, 0.0) + m0 = float(np.nansum(vv)) + if m0 <= 0: + return {k: np.nan for k in self._metric_keys()} + + dt = Tbeat / n + t = np.arange(n, dtype=float) * dt + + # First moment + m1 = float(np.nansum(vv * t)) + tau_M1 = m1 / m0 + tau_M1_over_T = tau_M1 / Tbeat + + # RI / PI robust + vmax = float(np.nanmax(vv)) + vmin = float(np.nanmin(v)) # vmin = float(np.min(vv)) + meanv = float(self._safe_nanmean(v)) # meanv = float(np.mean(vv)) + + if vmax <= 0: + RI = np.nan + PI = np.nan + else: + RI = 1.0 - (vmin / vmax) + RI = float(np.clip(RI, 0.0, 1.0)) if np.isfinite(RI) else np.nan + + if (not np.isfinite(meanv)) or meanv <= 0: + PI = np.nan + else: + PI = (vmax - vmin) / meanv + PI = float(PI) if np.isfinite(PI) else np.nan + + # RVTI (paper, split 1/2) + k_rvti = int(np.ceil(n * self.ratio_rvti)) + k_rvti = max(0, min(n, k_rvti)) + D1_rvti = float(np.sum(vv[:k_rvti])) if k_rvti > 0 else np.nan + D2_rvti = float(np.sum(vv[k_rvti:])) if k_rvti < n else np.nan + RVTI = D1_rvti / (D2_rvti + self.eps) + + # SF_VTI (split 1/3 vs 2/3) + k_sf = int(np.ceil(n * self.ratio_sf_vti)) + k_sf = max(0, min(n, k_sf)) + D1_sf = float(np.nansum(vv[:k_sf])) if k_sf > 0 else np.nan + D2_sf = float(np.nansum(vv[k_sf:])) if k_sf < n else np.nan + SF_VTI = D1_sf / (D1_sf + D2_sf + self.eps) + + # Central moments around tau_M1 (t_bar) + # mu2 = sum(v*(t-tau)^2) + dtau = t - tau_M1 + mu2 = float(np.nansum(vv * (dtau**2))) + tau_M2 = np.sqrt(mu2 / m0 + self.eps) + tau_M2_over_T = tau_M2 / Tbeat + + # Quantile timing features (on cumulative integral) + t10_over_T = self._quantile_time_over_T(vv, Tbeat, 0.10) + t25_over_T = self._quantile_time_over_T(vv, Tbeat, 0.25) + t50_over_T = self._quantile_time_over_T(vv, Tbeat, 0.50) + t75_over_T = self._quantile_time_over_T(vv, Tbeat, 0.75) + t90_over_T = self._quantile_time_over_T(vv, Tbeat, 0.90) + + # Spectral ratios + E_low_over_E_total, E_high_over_E_total = self._spectral_ratios(vv, Tbeat) + + return { + "tau_M1": float(tau_M1), + "tau_M1_over_T": float(tau_M1_over_T), + "RI": float(RI) if np.isfinite(RI) else np.nan, + "PI": float(PI) if np.isfinite(PI) else np.nan, + "R_VTI": float(RVTI), + "SF_VTI": float(SF_VTI), + "tau_M2_over_T": float(tau_M2_over_T), + "tau_M2": float(tau_M2), + "t10_over_T": float(t10_over_T), + "t25_over_T": float(t25_over_T), + "t50_over_T": float(t50_over_T), + "t75_over_T": float(t75_over_T), + "t90_over_T": float(t90_over_T), + "E_low_over_E_total": float(E_low_over_E_total), + "E_high_over_E_total": float(E_high_over_E_total), + # "HRI_2_10_total": float(HRI_2_10_total), + } + + @staticmethod + def _metric_keys() -> list[str]: + return [ + "tau_M1", + "tau_M1_over_T", + "RI", + "PI", + "R_VTI", + "SF_VTI", + "tau_M2_over_T", + "tau_M2", + "t10_over_T", + "t25_over_T", + "t50_over_T", + "t75_over_T", + "t90_over_T", + "E_low_over_E_total", + "E_high_over_E_total", + # "HRI_2_10_total", + ] + + def _compute_block_segment(self, v_block: np.ndarray, T: np.ndarray): + """ + v_block: (n_t, n_beats, n_branches, n_radii) + Returns: + per-segment arrays: (n_beats, n_segments) + per-branch arrays: (n_beats, n_branches) (median over radii) + global arrays: (n_beats,) (mean over all branches & radii) + """ + if v_block.ndim != 4: + raise ValueError( + f"Expected (n_t,n_beats,n_branches,n_radii), got {v_block.shape}" + ) + + n_t, n_beats, n_branches, n_radii = v_block.shape + n_segments = n_branches * n_radii + + # Allocate per metric + seg = { + k: np.full((n_beats, n_segments), np.nan, dtype=float) + for k in self._metric_keys() + } + br = { + k: np.full((n_beats, n_branches), np.nan, dtype=float) + for k in self._metric_keys() + } + gl = {k: np.full((n_beats,), np.nan, dtype=float) for k in self._metric_keys()} + + for beat_idx in range(n_beats): + Tbeat = float(T[0][beat_idx]) + + # For global aggregate at this beat + gl_vals = {k: [] for k in self._metric_keys()} + + for branch_idx in range(n_branches): + # For branch aggregate over radii + br_vals = {k: [] for k in self._metric_keys()} + + for radius_idx in range(n_radii): + v = v_block[:, beat_idx, branch_idx, radius_idx] + m = self._compute_metrics_1d(v, Tbeat) + + seg_idx = branch_idx * n_radii + radius_idx + for k in self._metric_keys(): + seg[k][beat_idx, seg_idx] = m[k] + br_vals[k].append(m[k]) + gl_vals[k].append(m[k]) + + # Branch aggregates: median over radii (nanmedian) + for k in self._metric_keys(): + br[k][beat_idx, branch_idx] = self._safe_nanmedian( + np.asarray(br_vals[k], dtype=float) + ) + + # Global aggregates: mean over all branches & radii (nanmean) + for k in self._metric_keys(): + gl[k][beat_idx] = self._safe_nanmean( + np.asarray(gl_vals[k], dtype=float) + ) + + seg_order_note = ( + "seg_idx = branch_idx * n_radii + radius_idx (branch-major flattening)" + ) + return seg, br, gl, n_branches, n_radii, seg_order_note + + def _compute_block_global(self, v_global: np.ndarray, T: np.ndarray): + """ + v_global: (n_t, n_beats) after _ensure_time_by_beat + Returns dict of arrays each shaped (n_beats,) + """ + n_beats = int(T.shape[1]) + v_global = self._ensure_time_by_beat(v_global, n_beats) + v_global = self._rectify_keep_nan(v_global) + + out = {k: np.full((n_beats,), np.nan, dtype=float) for k in self._metric_keys()} + + for beat_idx in range(n_beats): + Tbeat = float(T[0][beat_idx]) + v = v_global[:, beat_idx] + m = self._compute_metrics_1d(v, Tbeat) + for k in self._metric_keys(): + out[k][beat_idx] = m[k] + + return out + + # ------------------------- + # Pipeline entrypoint + # ------------------------- + def run(self, h5file) -> ProcessResult: + T = np.asarray(h5file[self.T_input]) + metrics = {} + + # ------------------------- + # Segment metrics (raw + bandlimited) + # ------------------------- + have_seg = (self.v_raw_segment_input in h5file) and ( + self.v_band_segment_input in h5file + ) + if have_seg: + v_raw_seg = np.asarray(h5file[self.v_raw_segment_input]) + v_band_seg = np.asarray(h5file[self.v_band_segment_input]) + + seg_b, br_b, gl_b, nb_b, nr_b, seg_note_b = self._compute_block_segment( + v_band_seg, T + ) + seg_r, br_r, gl_r, nb_r, nr_r, seg_note_r = self._compute_block_segment( + v_raw_seg, T + ) + + seg_note = seg_note_b + if (nb_b != nb_r) or (nr_b != nr_r): + seg_note = ( + seg_note_b + " | WARNING: raw/band branch/radius dims differ." + ) + + # Helper to pack dict-of-arrays into HDF5 metric keys + def pack(prefix: str, d: dict, attrs_common: dict): + for k, arr in d.items(): + metrics[f"{prefix}/{k}"] = with_attrs(arr, attrs_common) + + # Per-segment outputs (compat dataset names) + pack( + "by_segment/bandlimited_segment", + { + "tau_M1": seg_b["tau_M1"], + "tau_M1_over_T": seg_b["tau_M1_over_T"], + "RI": seg_b["RI"], + "PI": seg_b["PI"], + "R_VTI": seg_b["R_VTI"], + "SF_VTI": seg_b["SF_VTI"], + "tau_M2_over_T": seg_b["tau_M2_over_T"], + "tau_M2": seg_b["tau_M2"], + "t10_over_T": seg_b["t10_over_T"], + "t25_over_T": seg_b["t25_over_T"], + "t50_over_T": seg_b["t50_over_T"], + "t75_over_T": seg_b["t75_over_T"], + "t90_over_T": seg_b["t90_over_T"], + "E_low_over_E_total": seg_b["E_low_over_E_total"], + "E_high_over_E_total": seg_b["E_high_over_E_total"], + # "HRI_2_10_total": seg_b["HRI_2_10_total"], + }, + { + "segment_indexing": [seg_note], + }, + ) + pack( + "by_segment/raw_segment", + { + "tau_M1": seg_r["tau_M1"], + "tau_M1_over_T": seg_r["tau_M1_over_T"], + "RI": seg_r["RI"], + "PI": seg_r["PI"], + "R_VTI": seg_r["R_VTI"], + "SF_VTI": seg_r["SF_VTI"], + "tau_M2_over_T": seg_r["tau_M2_over_T"], + "tau_M2": seg_r["tau_M2"], + "t10_over_T": seg_r["t10_over_T"], + "t25_over_T": seg_r["t25_over_T"], + "t50_over_T": seg_r["t50_over_T"], + "t75_over_T": seg_r["t75_over_T"], + "t90_over_T": seg_r["t90_over_T"], + "E_low_over_E_total": seg_r["E_low_over_E_total"], + "E_high_over_E_total": seg_r["E_high_over_E_total"], + # "HRI_2_10_total": seg_r["HRI_2_10_total"], + }, + { + "segment_indexing": [seg_note], + }, + ) + + # Branch aggregates (median over radii) + pack( + "by_segment/bandlimited_branch", + br_b, + {"definition": ["median over radii per branch"]}, + ) + pack( + "by_segment/raw_branch", + br_r, + {"definition": ["median over radii per branch"]}, + ) + + # Global aggregates (mean over all branches & radii) + pack( + "by_segment/bandlimited_global", + gl_b, + {"definition": ["mean over branches and radii"]}, + ) + pack( + "by_segment/raw_global", + gl_r, + {"definition": ["mean over branches and radii"]}, + ) + + # Store parameters used (for provenance) + metrics["by_segment/params/ratio_rvti"] = np.asarray( + self.ratio_rvti, dtype=float + ) + metrics["by_segment/params/ratio_sf_vti"] = np.asarray( + self.ratio_sf_vti, dtype=float + ) + metrics["by_segment/params/eps"] = np.asarray(self.eps, dtype=float) + metrics["by_segment/params/H_LOW_MAX"] = np.asarray( + self.H_LOW_MAX, dtype=int + ) + metrics["by_segment/params/H_HIGH_MIN"] = np.asarray( + self.H_HIGH_MIN, dtype=int + ) + metrics["by_segment/params/H_HIGH_MAX"] = np.asarray( + self.H_HIGH_MAX, dtype=int + ) + + # ------------------------- + # Independent global metrics (raw + bandlimited) + # ------------------------- + have_glob = (self.v_raw_global_input in h5file) and ( + self.v_band_global_input in h5file + ) + if have_glob: + v_raw_gl = np.asarray(h5file[self.v_raw_global_input]) + v_band_gl = np.asarray(h5file[self.v_band_global_input]) + + out_raw = self._compute_block_global(v_raw_gl, T) + out_band = self._compute_block_global(v_band_gl, T) + + for k in self._metric_keys(): + metrics[f"global/raw/{k}"] = out_raw[k] + metrics[f"global/bandlimited/{k}"] = out_band[k] + + # provenance + metrics["global/params/ratio_rvti"] = np.asarray( + self.ratio_rvti, dtype=float + ) + metrics["global/params/ratio_sf_vti"] = np.asarray( + self.ratio_sf_vti, dtype=float + ) + metrics["global/params/eps"] = np.asarray(self.eps, dtype=float) + metrics["global/params/H_LOW_MAX"] = np.asarray(self.H_LOW_MAX, dtype=int) + metrics["global/params/H_HIGH_MIN"] = np.asarray(self.H_HIGH_MIN, dtype=int) + metrics["global/params/H_HIGH_MAX"] = np.asarray(self.H_HIGH_MAX, dtype=int) + + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/modal_analysis.py b/src/pipelines/modal_analysis.py new file mode 100644 index 0000000..5ff20a9 --- /dev/null +++ b/src/pipelines/modal_analysis.py @@ -0,0 +1,112 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="modal_analysis") +class ArterialExample(ProcessPipeline): + """ + Tutorial pipeline showing the full surface area of a pipeline: + + - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. + - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. + - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. + - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). + - No input data is required; this pipeline is purely illustrative. + """ + + description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." + M0_input = "/moment0" + M1_input = "/moment1" + M2_input = "/moment2" + registration_input = "/registration" + + def run(self, h5file) -> ProcessResult: + from scipy.sparse.linalg import svds + + moment_0 = np.asarray(h5file[self.M0_input]) + moment_1 = np.asarray(h5file[self.M1_input]) + moment_2 = np.asarray(h5file[self.M2_input]) + + M0_matrix = [] + M1_matrix = [] + M2_matrix = [] + M2_over_M0_squared = [] + x_size = len(moment_0[0, 0, :, 0]) + y_size = len(moment_0[0, 0, 0, :]) + for time_idx in range(len(moment_0[:, 0, 0, 0])): + M0_matrix_time = [] + M1_matrix_time = [] + M2_matrix_time = [] + M2_over_M0_squared_time = [] + for x_idx in range(x_size): + for y_idx in range(y_size): + M0 = moment_0[time_idx, 0, x_idx, y_idx] + + M2 = moment_2[time_idx, 0, x_idx, y_idx] + M0_matrix_time.append(M0) + M2_over_M0_squared_time.append(np.sqrt(M2 / M0)) + + M1_matrix_time.append(moment_1[time_idx, 0, x_idx, y_idx]) + + M2_matrix_time.append(moment_2[time_idx, 0, x_idx, y_idx]) + + M0_matrix.append(M0_matrix_time) + M1_matrix.append(M1_matrix_time) + M2_matrix.append(M2_matrix_time) + M2_over_M0_squared.append(M2_over_M0_squared_time) + M0_matrix = np.transpose(np.asarray(M0_matrix)) + M2_over_M0_squared = np.transpose(np.asarray(M2_over_M0_squared)) + n_modes = 20 + U_0, S_0, Vt_0 = svds(M0_matrix, k=n_modes) + + idx = np.argsort(S_0)[::-1] + S_0 = S_0[idx] + U_0 = U_0[:, idx] + Vt_0 = Vt_0[idx, :] + + spatial_modes = [] + for mode_idx in range(len(U_0[0])): + spatial_modes.append(U_0[:, mode_idx].reshape(x_size, y_size)) + + # M2 over M0 + + U_M2_over_M0_squared, S_M2_over_M0_squared, Vt_M2_over_M0_squared = svds( + M2_over_M0_squared, k=n_modes + ) + idx = np.argsort(S_M2_over_M0_squared)[::-1] + S_M2_over_M0_squared = S_M2_over_M0_squared[idx] + U_M2_over_M0_squared = U_M2_over_M0_squared[:, idx] + Vt_M2_over_M0_squared = Vt_M2_over_M0_squared[idx, :] + spatial_modes_M2_over_M0_squared = [] + + for mode_idx in range(len(U_M2_over_M0_squared[0])): + spatial_modes_M2_over_M0_squared.append( + U_M2_over_M0_squared[:, mode_idx].reshape(x_size, y_size) + ) + + # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. + metrics = { + "Vt_moment0": with_attrs(np.asarray(Vt_0), {"unit": [""]}), + "spatial_modes_moment0": with_attrs( + np.asarray(spatial_modes), {"unit": [""]} + ), + "S_moment0": with_attrs(np.asarray(S_0), {"unit": [""]}), + "U_moment0": with_attrs(np.asarray(U_0), {"unit": [""]}), + "Vt_M2_over_M0_squared": with_attrs( + np.asarray(Vt_M2_over_M0_squared), {"unit": [""]} + ), + "spatial_modes_M2_over_M0_squared": with_attrs( + np.asarray(spatial_modes_M2_over_M0_squared), {"unit": [""]} + ), + "S_M2_over_M0_squared": with_attrs( + np.asarray(S_M2_over_M0_squared), {"unit": [""]} + ), + "U_M2_over_M0_squared": with_attrs( + np.asarray(U_M2_over_M0_squared), {"unit": [""]} + ), + } + + # Artifacts can store non-metric outputs (strings, paths, etc.). + + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/old_seg_metrics.py b/src/pipelines/old_seg_metrics.py new file mode 100644 index 0000000..cee5908 --- /dev/null +++ b/src/pipelines/old_seg_metrics.py @@ -0,0 +1,634 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="old_waveform_shape_metrics") +class ArterialSegExample(ProcessPipeline): + """ + Waveform-shape metrics on per-beat, per-branch, per-radius velocity waveforms. + + Expected v_block layout: + v_block[:, beat_idx, branch_idx, radius_idx] + i.e. v_block shape: (n_t, n_beats, n_branches, n_radii) + + Outputs + ------- + A) Per-segment (flattened branch×radius): + *_segment : shape (n_beats, n_segments) + where n_segments = n_branches * n_radii and + seg_idx = branch_idx * n_radii + radius_idx (branch-major) + + B) Aggregated: + *_branch : shape (n_beats, n_branches) (median over radii) + *_global : shape (n_beats,) (mean over all branches & radii) + + Metric definitions + ------------------ + - Rectification: v <- max(v, 0) (NaNs preserved) + - tau_M1: first moment time / zeroth moment on rectified waveform + tau_M1 = M1/M0, M0 = sum(v), M1 = sum(v * t_k), t_k = k * (Tbeat/n_t) + - tau_M1_over_T: (tau_M1 / Tbeat) + - RI (robust): RI = 1 - vmin/vmax with guards for vmax<=0 or all-NaN + - R_VTI_*: kept dataset name for compatibility, but uses PAPER convention: + RVTI = D1 / (D2 + eps) + D1 = sum(v[0:k]), D2 = sum(v[k:n_t]), k = ceil(n_t * ratio), ratio=0.5 + """ + + description = ( + "Segment waveform shape metrics (tau, RI, RVTI) + branch/global aggregates." + ) + + v_raw_global_input = "/Artery/VelocityPerBeat/VelocitySignalPerBeat/value" + v_bandlimited_global_input = ( + "/Artery/VelocityPerBeat/VelocitySignalPerBeatBandLimited/value" + ) + v_bandlimited_global_max_input = ( + "/Artery/VelocityPerBeat/VmaxPerBeatBandLimited/value" + ) + v_bandlimited_global_min_input = ( + "/Artery/VelocityPerBeat/VminPerBeatBandLimited/value" + ) + T_input = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + + @staticmethod + def _rectify_keep_nan(x: np.ndarray) -> np.ndarray: + x = np.asarray(x, dtype=float) + return np.where(np.isfinite(x), np.maximum(x, 0.0), np.nan) + + @staticmethod + def _safe_nanmean(x: np.ndarray) -> float: + if x.size == 0 or not np.any(np.isfinite(x)): + return np.nan + return float(np.nanmean(x)) + + @staticmethod + def _safe_nanmedian(x: np.ndarray) -> float: + if x.size == 0 or not np.any(np.isfinite(x)): + return np.nan + return float(np.nanmedian(x)) + + @staticmethod + def _metrics_from_waveform( + v: np.ndarray, + Tbeat: float, + ratio: float = 0.5, + eps: float = 1e-12, + ): + v = ArterialSegExample._rectify_keep_nan(v) + + n = int(v.size) + if n <= 0: + return 0.0, 0.0, 0.0, 0.0 + + # tau_M1 and tau_M1/T (PER WAVEFORM ONLY) + if (not np.isfinite(Tbeat)) or Tbeat <= 0: + tau_M1 = np.nan + tau_M1_over_T = np.nan + else: + m0 = np.nansum(v) + if (not np.isfinite(m0)) or m0 <= 0: + tau_M1 = np.nan + tau_M1_over_T = np.nan + else: + dt = Tbeat / n + t = np.arange(n, dtype=float) * dt + m1 = np.nansum(v * t) + tau_M1 = (m1 / m0) if np.isfinite(m1) else np.nan + tau_M1_over_T = tau_M1 / Tbeat + + # RI robust + if not np.any(np.isfinite(v)): + RI = np.nan + PI = np.nan + else: + vmax = np.nanmax(v) + mean = np.nanmean(v) + if (not np.isfinite(vmax)) or vmax <= 0: + RI = np.nan + PI = np.nan + else: + vmin = np.nanmin(v) + RI = 1.0 - (vmin / vmax) + PI = (vmax - vmin) / mean + if not np.isfinite(RI): + RI = np.nan + PI = np.nan + else: + RI = float(np.clip(RI, 0.0, 1.0)) + PI = float(PI) + # RVTI (paper): D1/(D2+eps) + k = int(np.ceil(n * ratio)) + k = max(0, min(n, k)) + D1 = np.nansum(v[:k]) if k > 0 else np.nan + D2 = np.nansum(v[k:]) if k < n else np.nan + if not np.isfinite(D1) or D1 == 0.0: + D1 = np.nan + if not np.isfinite(D2) or D2 == 0.0: + D2 = np.nan + RVTI = float(D1 / (D2 + eps)) + + return float(tau_M1), float(tau_M1_over_T), float(RI), RVTI, float(PI) + + def _compute_block(self, v_block: np.ndarray, T: np.ndarray, ratio: float): + if v_block.ndim != 4: + raise ValueError( + f"Expected (n_t,n_beats,n_branches,n_radii), got {v_block.shape}" + ) + + n_t, n_beats, n_branches, n_radii = v_block.shape + n_segments = n_branches * n_radii + + # Per-segment flattened (beat, segment) + tau_seg = np.zeros((n_beats, n_segments), dtype=float) + tauT_seg = np.zeros((n_beats, n_segments), dtype=float) + RI_seg = np.zeros((n_beats, n_segments), dtype=float) + PI_seg = np.zeros((n_beats, n_segments), dtype=float) + RVTI_seg = np.zeros((n_beats, n_segments), dtype=float) + + # Aggregated + tau_branch = np.zeros((n_beats, n_branches), dtype=float) + tauT_branch = np.zeros((n_beats, n_branches), dtype=float) + RI_branch = np.zeros((n_beats, n_branches), dtype=float) + PI_branch = np.zeros((n_beats, n_branches), dtype=float) + RVTI_branch = np.zeros((n_beats, n_branches), dtype=float) + + tau_global = np.zeros((n_beats,), dtype=float) + tauT_global = np.zeros((n_beats,), dtype=float) + RI_global = np.zeros((n_beats,), dtype=float) + PI_global = np.zeros((n_beats,), dtype=float) + RVTI_global = np.zeros((n_beats,), dtype=float) + + for beat_idx in range(n_beats): + Tbeat = float(T[0][beat_idx]) + + # Global accumulators for this beat + tau_vals = [] + tauT_vals = [] + RI_vals = [] + PI_vals = [] + RVTI_vals = [] + + for branch_idx in range(n_branches): + # Branch accumulators across radii + tau_b = [] + tauT_b = [] + RI_b = [] + PI_b = [] + RVTI_b = [] + + for radius_idx in range(n_radii): + v = v_block[:, beat_idx, branch_idx, radius_idx] + tM1, tM1T, ri, rvti, pi = self._metrics_from_waveform( + v=v, Tbeat=Tbeat, ratio=ratio, eps=1e-12 + ) + + seg_idx = branch_idx * n_radii + radius_idx + tau_seg[beat_idx, seg_idx] = tM1 + tauT_seg[beat_idx, seg_idx] = tM1T + RI_seg[beat_idx, seg_idx] = ri + RVTI_seg[beat_idx, seg_idx] = rvti + PI_seg[beat_idx, seg_idx] = pi + + tau_b.append(tM1) + tauT_b.append(tM1T) + RI_b.append(ri) + RVTI_b.append(rvti) + PI_b.append(pi) + + tau_vals.append(tM1) + tauT_vals.append(tM1T) + RI_vals.append(ri) + RVTI_vals.append(rvti) + PI_vals.append(pi) + + # Branch aggregates: MEDIAN over radii + tau_branch[beat_idx, branch_idx] = self._safe_nanmedian( + np.asarray(tau_b) + ) + tauT_branch[beat_idx, branch_idx] = self._safe_nanmedian( + np.asarray(tauT_b) + ) + RI_branch[beat_idx, branch_idx] = self._safe_nanmedian(np.asarray(RI_b)) + PI_branch[beat_idx, branch_idx] = self._safe_nanmedian(np.asarray(PI_b)) + RVTI_branch[beat_idx, branch_idx] = self._safe_nanmedian( + np.asarray(RVTI_b) + ) + + # Global aggregates: MEAN over all branches & radii + tau_global[beat_idx] = self._safe_nanmean(np.asarray(tau_vals)) + tauT_global[beat_idx] = self._safe_nanmean(np.asarray(tauT_vals)) + RI_global[beat_idx] = self._safe_nanmean(np.asarray(RI_vals)) + RVTI_global[beat_idx] = self._safe_nanmean(np.asarray(RVTI_vals)) + PI_global[beat_idx] = self._safe_nanmean(np.asarray(PI_vals)) + + return ( + tau_seg, + tauT_seg, + RI_seg, + PI_seg, + RVTI_seg, + tau_branch, + tauT_branch, + RI_branch, + PI_branch, + RVTI_branch, + tau_global, + tauT_global, + RI_global, + PI_global, + RVTI_global, + n_branches, + n_radii, + ) + + def run(self, h5file) -> ProcessResult: + T = np.asarray(h5file[self.T_input]) + ratio_systole_diastole_R_VTI = 0.5 + + try: + v_raw_input = ( + "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegment/value" + ) + v_bandlimited_input = "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegmentBandLimited/value" + + v_raw = np.asarray(h5file[v_raw_input]) + v_band = np.asarray(h5file[v_bandlimited_input]) + v_raw = self._rectify_keep_nan(v_raw) + v_band = self._rectify_keep_nan(v_band) + + ( + tau_seg_b, + tauT_seg_b, + RI_seg_b, + PI_seg_b, + RVTI_seg_b, + tau_br_b, + tauT_br_b, + RI_br_b, + PI_br_b, + RVTI_br_b, + tau_gl_b, + tauT_gl_b, + RI_gl_b, + PI_gl_b, + RVTI_gl_b, + n_branches_b, + n_radii_b, + ) = self._compute_block(v_band, T, ratio_systole_diastole_R_VTI) + + ( + tau_seg_r, + tauT_seg_r, + RI_seg_r, + PI_seg_r, + RVTI_seg_r, + tau_br_r, + tauT_br_r, + RI_br_r, + PI_br_r, + RVTI_br_r, + tau_gl_r, + tauT_gl_r, + RI_gl_r, + PI_gl_r, + RVTI_gl_r, + n_branches_r, + n_radii_r, + ) = self._compute_block(v_raw, T, ratio_systole_diastole_R_VTI) + + # Consistency attributes (optional but useful) + seg_order_note = ( + "seg_idx = branch_idx * n_radii + radius_idx (branch-major flattening)" + ) + if n_radii_b != n_radii_r or n_branches_b != n_branches_r: + seg_order_note += ( + " | WARNING: raw/bandlimited branch/radius dims differ." + ) + + metrics = { + # --- Existing datasets (unchanged names/shapes) --- + "by_segment/tau_M1_bandlimited_segment": with_attrs( + tau_seg_b, + { + "unit": ["s"], + "definition": ["tau_M1 = M1/M0 on rectified waveform"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/tau_M1_over_T_bandlimited_segment": with_attrs( + tauT_seg_b, + { + "unit": [""], + "definition": ["tau_M1_over_T = (M1/M0)/T"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/RI_bandlimited_segment": with_attrs( + RI_seg_b, + { + "unit": [""], + "definition": ["RI = 1 - vmin/vmax (robust, rectified)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/PI_bandlimited_segment": with_attrs( + PI_seg_b, + { + "unit": [""], + "definition": ["RI = 1 - vmin/vmax (robust, rectified)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/R_VTI_bandlimited_segment": with_attrs( + RVTI_seg_b, + { + "unit": [""], + "definition": ["paper RVTI = D1/(D2+eps)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/tau_M1_raw_segment": with_attrs( + tau_seg_r, + { + "unit": ["s"], + "definition": ["tau_M1 = M1/M0 on rectified waveform"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/tau_M1_over_T_raw_segment": with_attrs( + tauT_seg_r, + { + "unit": [""], + "definition": ["tau_M1_over_T = (M1/M0)/T"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/RI_raw_segment": with_attrs( + RI_seg_r, + { + "unit": [""], + "definition": ["RI = 1 - vmin/vmax (robust, rectified)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/PI_raw_segment": with_attrs( + PI_seg_r, + { + "unit": [""], + "definition": ["RI = 1 - vmin/vmax (robust, rectified)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/R_VTI_raw_segment": with_attrs( + RVTI_seg_r, + { + "unit": [""], + "definition": ["paper RVTI = D1/(D2+eps)"], + "segment_indexing": [seg_order_note], + }, + ), + "by_segment/ratio_systole_diastole_R_VTI": np.asarray( + ratio_systole_diastole_R_VTI, dtype=float + ), + # --- New aggregated outputs --- + "by_segment/tau_M1_bandlimited_branch": with_attrs( + tau_br_b, + { + "unit": ["s"], + "definition": ["median over radii: tau_M1 per branch"], + }, + ), + "by_segment/tau_M1_over_T_bandlimited_branch": with_attrs( + tauT_br_b, + { + "unit": [""], + "definition": ["median over radii: tau_M1/T per branch"], + }, + ), + "by_segment/RI_bandlimited_branch": with_attrs( + RI_br_b, + {"unit": [""], "definition": ["median over radii: RI per branch"]}, + ), + "by_segment/PI_bandlimited_branch": with_attrs( + PI_br_b, + {"unit": [""], "definition": ["median over radii: RI per branch"]}, + ), + "by_segment/R_VTI_bandlimited_branch": with_attrs( + RVTI_br_b, + { + "unit": [""], + "definition": ["median over radii: paper RVTI per branch"], + }, + ), + "by_segment/tau_M1_bandlimited_global": with_attrs( + tau_gl_b, + { + "unit": ["s"], + "definition": ["mean over branches & radii: tau_M1 global"], + }, + ), + "by_segment/tau_M1_over_T_bandlimited_global": with_attrs( + tauT_gl_b, + { + "unit": [""], + "definition": ["mean over branches & radii: tau_M1/T global"], + }, + ), + "by_segment/RI_bandlimited_global": with_attrs( + RI_gl_b, + { + "unit": [""], + "definition": ["mean over branches & radii: RI global"], + }, + ), + "by_segment/PI_bandlimited_global": with_attrs( + PI_gl_b, + { + "unit": [""], + "definition": ["mean over branches & radii: RI global"], + }, + ), + "by_segment/R_VTI_bandlimited_global": with_attrs( + RVTI_gl_b, + { + "unit": [""], + "definition": ["mean over branches & radii: paper RVTI global"], + }, + ), + "by_segment/tau_M1_raw_branch": with_attrs( + tau_br_r, + { + "unit": ["s"], + "definition": ["median over radii: tau_M1 per branch"], + }, + ), + "by_segment/tau_M1_over_T_raw_branch": with_attrs( + tauT_br_r, + { + "unit": [""], + "definition": ["median over radii: tau_M1/T per branch"], + }, + ), + "by_segment/RI_raw_branch": with_attrs( + RI_br_r, + {"unit": [""], "definition": ["median over radii: RI per branch"]}, + ), + "by_segment/PI_raw_branch": with_attrs( + PI_br_r, + {"unit": [""], "definition": ["median over radii: RI per branch"]}, + ), + "by_segment/R_VTI_raw_branch": with_attrs( + RVTI_br_r, + { + "unit": [""], + "definition": ["median over radii: paper RVTI per branch"], + }, + ), + "by_segment/tau_M1_raw_global": with_attrs( + tau_gl_r, + { + "unit": ["s"], + "definition": ["mean over branches & radii: tau_M1 global"], + }, + ), + "by_segment/tau_M1_over_T_raw_global": with_attrs( + tauT_gl_r, + { + "unit": [""], + "definition": ["mean over branches & radii: tau_M1/T global"], + }, + ), + "by_segment/RI_raw_global": with_attrs( + RI_gl_r, + { + "unit": [""], + "definition": ["mean over branches & radii: RI global"], + }, + ), + "by_segment/PI_raw_global": with_attrs( + PI_gl_r, + { + "unit": [""], + "definition": ["mean over branches & radii: RI global"], + }, + ), + "by_segment/R_VTI_raw_global": with_attrs( + RVTI_gl_r, + { + "unit": [""], + "definition": ["mean over branches & radii: paper RVTI global"], + }, + ), + } + + except Exception: # noqa: BLE001 + metrics = {} + v_raw = np.asarray(h5file[self.v_raw_global_input]) + v_raw = np.maximum(v_raw, 0) + v_bandlimited = np.asarray(h5file[self.v_bandlimited_global_input]) + v_bandlimited = np.maximum(v_bandlimited, 0) + v_bandlimited_max = np.asarray(h5file[self.v_bandlimited_global_max_input]) + v_bandlimited_max = np.maximum(v_bandlimited_max, 0) + v_bandlimited_min = np.asarray(h5file[self.v_bandlimited_global_min_input]) + v_bandlimited_min = np.maximum(v_bandlimited_min, 0) + tau_M1_raw = [] + tau_M1_over_T_raw = [] + tau_M1_bandlimited = [] + tau_M1_over_T_bandlimited = [] + + R_VTI_bandlimited = [] + R_VTI_raw = [] + + RI_bandlimited = [] + RI_raw = [] + PI_bandlimited = [] + PI_raw = [] + + ratio_systole_diastole_R_VTI = 0.5 + + for beat_idx in range(len(T[0])): + t = T[0][beat_idx] / len(v_raw.T[beat_idx]) + D1_raw = np.sum( + v_raw.T[beat_idx][ + : int(np.ceil(len(v_raw.T[0]) * ratio_systole_diastole_R_VTI)) + ] + ) + D2_raw = np.sum( + v_raw.T[beat_idx][ + int(np.ceil(len(v_raw.T[0]) * ratio_systole_diastole_R_VTI)) : + ] + ) + D1_bandlimited = np.sum( + v_bandlimited.T[beat_idx][ + : int( + np.ceil(len(v_bandlimited.T[0]) * ratio_systole_diastole_R_VTI) + ) + ] + ) + D2_bandlimited = np.sum( + v_bandlimited.T[beat_idx][ + int( + np.ceil(len(v_bandlimited.T[0]) * ratio_systole_diastole_R_VTI) + ) : + ] + ) + R_VTI_bandlimited.append(D1_bandlimited / (D2_bandlimited + 10 ** (-12))) + R_VTI_raw.append(D1_raw / (D2_raw + 10 ** (-12))) + M_0 = np.sum(v_raw.T[beat_idx]) + M_1 = 0 + for time_idx in range(len(v_raw.T[beat_idx])): + M_1 += v_raw[time_idx][beat_idx] * time_idx * t + TM1 = M_1 / M_0 + tau_M1_raw.append(TM1) + tau_M1_over_T_raw.append(TM1 / T[0][beat_idx]) + + for beat_idx in range(len(T[0])): + t = T[0][beat_idx] / len(v_raw.T[beat_idx]) + M_0 = np.sum(v_bandlimited.T[beat_idx]) + M_1 = 0 + for time_idx in range(len(v_raw.T[beat_idx])): + M_1 += v_bandlimited[time_idx][beat_idx] * time_idx * t + TM1 = M_1 / M_0 + tau_M1_bandlimited.append(TM1) + tau_M1_over_T_bandlimited.append(TM1 / T[0][beat_idx]) + + for beat_idx in range(len(v_bandlimited_max[0])): + RI_bandlimited_temp = 1 - ( + np.min(v_bandlimited.T[beat_idx]) / np.max(v_bandlimited.T[beat_idx]) + ) + RI_bandlimited.append(RI_bandlimited_temp) + PI_bandlimited_temp = ( + np.max(v_bandlimited.T[beat_idx]) - np.min(v_bandlimited.T[beat_idx]) + ) / np.mean(v_bandlimited.T[beat_idx]) + PI_bandlimited.append(PI_bandlimited_temp) + + for beat_idx in range(len(v_bandlimited_max[0])): + RI_raw_temp = 1 - (np.min(v_raw.T[beat_idx]) / np.max(v_raw.T[beat_idx])) + RI_raw.append(RI_raw_temp) + PI_raw_temp = ( + np.max(v_raw.T[beat_idx]) - np.min(v_raw.T[beat_idx]) + ) / np.mean(v_raw.T[beat_idx]) + PI_raw.append(PI_raw_temp) + metrics.update( + { + "global/tau_M1_raw": with_attrs(np.asarray(tau_M1_raw), {"unit": [""]}), + "global/tau_M1_bandlimited": np.asarray(tau_M1_bandlimited), + "global/tau_M1_over_T_raw": with_attrs( + np.asarray(tau_M1_over_T_raw), {"unit": [""]} + ), + "global/tau_M1_over_T_bandlimited": np.asarray( + tau_M1_over_T_bandlimited + ), + "global/RI_bandlimited": np.asarray(RI_bandlimited), + "global/RI_raw": np.asarray(RI_raw), + "global/PI_raw": np.asarray(PI_raw), + "global/PI_bandlimited": np.asarray(PI_bandlimited), + "global/R_VTI_bandlimited": np.asarray(R_VTI_bandlimited), + "global/R_VTI_raw": np.asarray(R_VTI_raw), + "global/ratio_systole_diastole_R_VTI": np.asarray( + ratio_systole_diastole_R_VTI + ), + } + ) + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/pulse_waveform_shape_metrics(harmonic analysis).py b/src/pipelines/pulse_waveform_shape_metrics(harmonic analysis).py new file mode 100644 index 0000000..7eaf3fd --- /dev/null +++ b/src/pipelines/pulse_waveform_shape_metrics(harmonic analysis).py @@ -0,0 +1,164 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="Harmonic metrics") +class ArterialHarmonicAnalysis(ProcessPipeline): + """ + Tutorial pipeline showing the full surface area of a pipeline: + + - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. + - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. + - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. + - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). + - No input data is required; this pipeline is purely illustrative. + """ + + description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." + v_raw = "/Artery/VelocityPerBeat/VelocitySignalPerBeat/value" + v = "/Artery/VelocityPerBeat/VelocitySignalPerBeatBandLimited/value" + T = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + + def run(self, h5file) -> ProcessResult: + + v_ds = np.asarray(h5file[self.v]) + t_ds = np.asarray(h5file[self.T]) + + N = len(v_ds[:, 0]) + nb_harmonic = 10 + Xn = [] + + CV_1 = [] + CV_2 = [] + + Vn_tot = [] + sigma_phase_tot = [] + v_hat = [] + CF_tot = [] + D_alpha = [] + + FVTI = [] + IVTI = [] + AVTI_tot = [] + + alpha = 0.5 # that needs to be specified + for k in range(len(v_ds[0])): + T = t_ds[0][k] + fft_vals = np.fft.fft(v_ds[:, k]) / N + limit = nb_harmonic + 1 + Vn = fft_vals[:limit] + V1 = Vn[1] + Xn_k = Vn / V1 + Xn.append(Xn_k) + Vn_tot.append(Vn) + + omega0 = (2 * np.pi) / T + t = np.linspace(0, T, N) + + v_hat_k = np.real(Vn[0]) * np.ones_like(t) + + for i in range(1, limit): + h = 2 * (Vn[i] * np.exp(1j * i * omega0 * t)).real + v_hat_k += h + v_hat.append(v_hat_k) + + RMST = np.sqrt(np.mean(v_hat[k] ** 2)) + CF = np.max(v_hat_k) / RMST + CF_tot.append(CF) + + amp1 = np.abs(np.asarray(Vn_tot)[:, 1]) + amp2 = np.abs(np.asarray(Vn_tot)[:, 2]) + + CV_1.append(np.std(amp1) / np.mean(amp1)) + CV_2.append(np.std(amp2) / np.mean(amp2)) + + phi1 = np.angle(np.asarray(Xn)[:, 1]) + phi2 = np.angle(np.asarray(Xn)[:, 2]) + + diff_phase = phi2 - 2 * phi1 + diff_phase_wrap = (diff_phase + np.pi) % (2 * np.pi) - np.pi + sigma_phase = np.std(diff_phase_wrap) + sigma_phase_tot.append(sigma_phase) + + seuil = Vn[0] + alpha * np.std(v_hat) + condition = v_hat > seuil + D_alpha.append(np.mean(condition)) + + v_base = np.min(v_hat[k]) + max = np.maximum(v_hat[k] - v_base, 0) + + dt = t[1] - t[0] + moitie = len(t) // 2 + d1 = np.sum(max[:moitie]) * dt + d2 = np.sum(max[moitie:]) * dt + AVTI = np.sum(max) * dt + AVTI_tot.append(AVTI) + FVTI.append(d1 / AVTI) + IVTI.append((d1 - d2) / (d1 + d2)) + + metrics = { + "Xn": with_attrs( + np.asarray(Xn), + { + "unit": [""], + }, + ), + "AVTI": with_attrs( + np.asarray(AVTI_tot), + { + "unit": [""], + }, + ), + "CV1 : Beat to beat amplitude stability (n=1)": with_attrs( + np.asarray(CV_1), + { + "unit": [""], + }, + ), + "CV2 : Beat to beat amplitude stability (n=2)": with_attrs( + np.asarray(CV_2), + { + "unit": [""], + }, + ), + "sigma : Beat to beat phase coupling stability (n=2)": with_attrs( + np.asarray(sigma_phase_tot), + { + "unit": [""], + }, + ), + "v_hat : Band limited waveform (definition)": with_attrs( + np.asarray(v_hat), + { + "unit": [""], + }, + ), + "CF : Band limited crest factor ": with_attrs( + np.asarray(CF_tot), + { + "unit": [""], + }, + ), + " D_alpha : Effective Duty cycle ": with_attrs( + np.asarray(D_alpha), + { + "unit": [""], + }, + ), + "FVTI : Normalised first half fraction ": with_attrs( + np.asarray(FVTI), + { + "unit": [""], + }, + ), + "IVTI : VTI asymmetry index ": with_attrs( + np.asarray(IVTI), + { + "unit": [""], + }, + ), + } + # Artifacts can store non-metric outputs (strings, paths, etc.). + + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/recreatesig.py b/src/pipelines/recreatesig.py index 7aca14a..db13b30 100644 --- a/src/pipelines/recreatesig.py +++ b/src/pipelines/recreatesig.py @@ -1,6 +1,6 @@ import numpy as np -from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs +from .core.base import ProcessPipeline, ProcessResult, registerPipeline @registerPipeline(name="reconstruct") @@ -16,11 +16,9 @@ class Reconstruct(ProcessPipeline): """ description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." - v_profile = "/Artery/Velocity/VelocityProfiles/value" + v_profile = "/Artery/CrossSections/VelocityProfileSeg/value" vsystol = "/Artery/Velocity/SystolicAccelerationPeakIndexes" T_val = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" - vmax = "/Artery/VelocityPerBeat/VmaxPerBeatBandLimited/value" - vmin = "/Artery/VelocityPerBeat/VminPerBeatBandLimited/value" def gaussian(x, A, mu, sigma, c): return A * np.exp(-((x - mu) ** 2) / (2 * sigma**2)) + c @@ -34,10 +32,8 @@ def run(self, h5file) -> ProcessResult: V_corrected = [] V_ceil = [] - # V_gauss = [] for k in range(len(v_seg[0, :, 0, 0])): - # VIT_Time = 0 Vit_br = [] for br in range(len(v_seg[0, k, :, 0])): v_branch = np.nanmean(v_seg[:, k, br, :], axis=1) @@ -81,9 +77,68 @@ def run(self, h5file) -> ProcessResult: Vit.append(np.nanmean(Vit_br)) V_corrected.append(np.nanmean(Vit)) + + """vraw_ds = np.asarray(v_threshold_beat_segment) + vraw_ds_temp = vraw_ds.transpose(1, 0, 2, 3) + vraw_ds = np.maximum(vraw_ds_temp, 0) + v_ds = vraw_ds + t_ds = np.asarray(h5file[self.T_input]) + + TMI_seg = [] + TMI_seg_band = [] + RTVI_seg = [] + RTVI_seg_band = [] + RI_seg = [] + RI_seg_band = [] + M0_seg = 0 + M1_seg = 0 + M0_seg_band = 0 + M1_seg_band = 0 + for k in range(len(vraw_ds[0, :, 0, 0])): + TMI_branch = [] + TMI_branch_band = [] + RTVI_band_branch = [] + RTVI_branch = [] + RI_branch = [] + RI_branch_band = [] + for i in range(len(vraw_ds[0, k, :, 0])): + avg_speed_band = np.nanmean(v_ds[:, k, i, :], axis=1) + avg_speed = np.nanmean(vraw_ds[:, k, i, :], axis=1) + vmin = np.min(avg_speed) + vmax = np.max(avg_speed) + vmin_band = np.min(avg_speed_band) + vmax_band = np.max(avg_speed_band) + + RI_branch.append(1 - (vmin / (vmax + 10 ** (-14)))) + RI_branch_band.append(1 - (vmin_band / (vmax_band + 10 ** (-14)))) + D1_raw = np.sum(avg_speed[:31]) + D2_raw = np.sum(avg_speed[32:]) + D1 = np.sum(avg_speed_band[:31]) + D2 = np.sum(avg_speed_band[32:]) + RTVI_band_branch.append(D1 / (D2 + 10 ** (-12))) + RTVI_branch.append(D1_raw / (D2_raw + 10 ** (-12))) + M0_seg += np.sum(avg_speed) + M0_seg_band += np.sum(avg_speed_band) + for j in range(len(avg_speed)): + M1_seg += avg_speed[j] * j * t_ds[0][k] / 64 + M1_seg_band += avg_speed_band[j] * j * t_ds[0][k] / 64 + if M0_seg != 0: + TMI_branch.append(M1_seg / (t_ds[0][k] * M0_seg)) + else: + TMI_branch.append(0) + if M0_seg_band != 0: + TMI_branch_band.append(M1_seg_band / (t_ds[0][k] * M0_seg_band)) + else: + TMI_branch_band.append(0) + + TMI_seg.append(TMI_branch) + TMI_seg_band.append(TMI_branch_band) + RI_seg.append(RI_branch) + RI_seg_band.append(RI_branch_band) + RTVI_seg.append(RTVI_branch) + RTVI_seg_band.append(RTVI_band_branch)""" for k in range(len(v_seg[0, :, 0, 0])): Vit = [] - # Vit_gauss = [] for br in range(len(v_seg[0, k, :, 0])): Vit_br = [] for seg in range(len(v_seg[0, k, br, :])): @@ -113,7 +168,7 @@ def run(self, h5file) -> ProcessResult: + values[last - threshold :] ) ) - except Exception: # noqa: BLE001 + except Exception: Vit_br.append(np.nan) return None @@ -123,27 +178,9 @@ def run(self, h5file) -> ProcessResult: # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. metrics = { - "Xn": with_attrs( - np.asarray(V), - { - "unit": [""], - "description": [""], - }, - ), - "Xn_correc": with_attrs( - np.asarray(V_corrected), - { - "unit": [""], - "description": [""], - }, - ), - "Xn_ceil": with_attrs( - np.asarray(V_ceil), - { - "unit": [""], - "description": [""], - }, - ), + "Xn": np.asarray(V), + "Xn_correc": np.asarray(V_corrected), + "Xn_ceil": np.asarray(V_ceil), } # Artifacts can store non-metric outputs (strings, paths, etc.). diff --git a/src/pipelines/segment_velocity_waveform_shape_metrics.py b/src/pipelines/segment_velocity_waveform_shape_metrics.py new file mode 100644 index 0000000..71878fb --- /dev/null +++ b/src/pipelines/segment_velocity_waveform_shape_metrics.py @@ -0,0 +1,238 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="segment_waveform_shape_metrics") +class ArterialSegExample(ProcessPipeline): + """ + Tutorial pipeline showing the full surface area of a pipeline: + + - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. + - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. + - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. + - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). + - No input data is required; this pipeline is purely illustrative. + """ + + description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." + v_raw_input = ( + "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegment/value" + ) + v_bandlimited_input = "/Artery/VelocityPerBeat/Segments/VelocitySignalPerBeatPerSegmentBandLimited/value" + T_input = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + + def run(self, h5file) -> ProcessResult: + + v_raw = np.asarray(h5file[self.v_raw_input]) + v_raw = np.maximum(v_raw, 0) + + v_bandlimited = np.asarray(h5file[self.v_bandlimited_input]) + v_bandlimited = np.maximum(v_bandlimited, 0) + + T = np.asarray(h5file[self.T_input]) + + moment_0_segment = 0 + moment_1_segment = 0 + + Tau_M1_bandlimited_segment = [] + Tau_M1_over_T_bandlimited_segment = [] + RI_bandlimited_segment = [] + R_VTI_bandlimited_segment = [] + + Tau_M1_raw_segment = [] + Tau_M1_over_T_raw_segment = [] + RI_raw_segment = [] + R_VTI_raw_segment = [] + + ratio_systole_diastole_R_VTI = 0.5 + + for beat_idx in range(len(v_bandlimited[0, :, 0, 0])): + Tau_M1_bandlimited_branch = [] + Tau_M1_over_T_bandlimited_branch = [] + RI_bandlimited_branch = [] + R_VTI_bandlimited_branch = [] + + t = T[0][beat_idx] / len(v_bandlimited) + + for branch_idx in range(len(v_bandlimited[0, beat_idx, :, 0])): + Tau_M1_bandlimited_radius = [] + Tau_M1_over_T_bandlimited_radius = [] + RI_bandlimited_radius = [] + R_VTI_bandlimited_radius = [] + + for radius_idx in range(len(v_bandlimited[0, beat_idx, branch_idx, :])): + v_bandlimited_idx = v_bandlimited[ + :, beat_idx, branch_idx, radius_idx + ] + + moment_0_segment = np.sum(v_bandlimited_idx) + moment_1_segment = 0 + + for time_idx in range(len(v_bandlimited_idx)): + moment_1_segment += v_bandlimited_idx[time_idx] * time_idx * t + + TM1 = moment_1_segment / moment_0_segment + Tau_M1_bandlimited_radius.append(TM1) + Tau_M1_over_T_bandlimited_radius.append(TM1 / T[0][beat_idx]) + + v_bandlimited_max = np.max(v_bandlimited_idx) + v_bandlimited_min = np.min(v_bandlimited_idx) + + RI_bandlimited_radius_idx = 1 - ( + v_bandlimited_min / v_bandlimited_max + ) + RI_bandlimited_radius.append(RI_bandlimited_radius_idx) + + epsilon = 10 ** (-12) + D1_bandlimited = np.sum( + v_bandlimited_idx[ + : int( + np.ceil( + len(v_bandlimited_idx) + * ratio_systole_diastole_R_VTI + ) + ) + ] + ) + D2_bandlimited = np.sum( + v_bandlimited_idx[ + int( + np.ceil( + len(v_bandlimited_idx) + * ratio_systole_diastole_R_VTI + ) + ) : + ] + ) + R_VTI_bandlimited_radius.append( + D1_bandlimited / (D2_bandlimited + epsilon) + ) + + Tau_M1_bandlimited_branch.append(Tau_M1_bandlimited_radius) + Tau_M1_over_T_bandlimited_branch.append( + Tau_M1_over_T_bandlimited_radius + ) + RI_bandlimited_branch.append(RI_bandlimited_radius) + R_VTI_bandlimited_branch.append(R_VTI_bandlimited_radius) + + Tau_M1_bandlimited_segment.append(Tau_M1_bandlimited_branch) + Tau_M1_over_T_bandlimited_segment.append(Tau_M1_over_T_bandlimited_branch) + RI_bandlimited_segment.append(RI_bandlimited_branch) + R_VTI_bandlimited_segment.append(R_VTI_bandlimited_branch) + + for beat_idx in range(len(v_raw[0, :, 0, 0])): + Tau_M1_raw_branch = [] + Tau_M1_over_T_raw_branch = [] + RI_raw_branch = [] + R_VTI_raw_branch = [] + + t = T[0][beat_idx] / len(v_raw) + + for branch_idx in range(len(v_raw[0, beat_idx, :, 0])): + Tau_M1_raw_radius = [] + Tau_M1_over_T_raw_radius = [] + RI_raw_radius = [] + R_VTI_raw_radius = [] + + for radius_idx in range(len(v_raw[0, beat_idx, branch_idx, :])): + v_raw_idx = v_raw[:, beat_idx, branch_idx, radius_idx] + + moment_0_segment = np.sum(v_raw_idx) + moment_1_segment = 0 + for time_idx in range(len(v_raw_idx)): + moment_1_segment += v_raw_idx[time_idx] * time_idx * t + + TM1 = moment_1_segment / moment_0_segment + Tau_M1_raw_radius.append(TM1) + Tau_M1_over_T_raw_radius.append(TM1 / T[0][beat_idx]) + + v_raw_max = np.max(v_raw_idx) + v_raw_min = np.min(v_raw_idx) + + RI_raw_radius_idx = 1 - (v_raw_min / v_raw_max) + RI_raw_radius.append(RI_raw_radius_idx) + + epsilon = 10 ** (-12) + D1_raw = np.sum( + v_raw_idx[ + : int( + np.ceil(len(v_raw_idx) * ratio_systole_diastole_R_VTI) + ) + ] + ) + D2_raw = np.sum( + v_raw_idx[ + int( + np.ceil(len(v_raw_idx) * ratio_systole_diastole_R_VTI) + ) : + ] + ) + R_VTI_raw_radius.append(D1_raw / (D2_raw + epsilon)) + + Tau_M1_raw_branch.append(Tau_M1_raw_radius) + Tau_M1_over_T_raw_branch.append(Tau_M1_over_T_raw_radius) + RI_raw_branch.append(RI_raw_radius) + R_VTI_raw_branch.append(R_VTI_raw_radius) + + Tau_M1_raw_segment.append(Tau_M1_raw_branch) + Tau_M1_over_T_raw_segment.append(Tau_M1_over_T_raw_branch) + RI_raw_segment.append(RI_raw_branch) + R_VTI_raw_segment.append(R_VTI_raw_branch) + + # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. + metrics = { + "tau_M1_bandlimited_segment": with_attrs( + np.asarray(Tau_M1_bandlimited_segment), + { + "unit": [""], + }, + ), + "R_VTI_bandlimited_segment": with_attrs( + np.asarray(R_VTI_bandlimited_segment), + { + "unit": [""], + }, + ), + "RI_bandlimited_segment": with_attrs( + np.asarray(RI_bandlimited_segment), + { + "unit": [""], + }, + ), + "tau_M1_over_T_bandlimited_segment": with_attrs( + np.asarray(Tau_M1_over_T_bandlimited_segment), + { + "unit": [""], + }, + ), + "tau_M1_raw_segment": with_attrs( + np.asarray(Tau_M1_raw_segment), + { + "unit": [""], + }, + ), + "R_VTI_raw_segment": with_attrs( + np.asarray(R_VTI_raw_segment), + { + "unit": [""], + }, + ), + "RI_raw_segment": with_attrs( + np.asarray(RI_raw_segment), + { + "unit": [""], + }, + ), + "tau_M1_over_T_raw_segment": with_attrs( + np.asarray(Tau_M1_over_T_raw_segment), + { + "unit": [""], + }, + ), + "ratio_systole_diastole_R_VTI": np.asarray(ratio_systole_diastole_R_VTI), + } + + # Artifacts can store non-metric outputs (strings, paths, etc.). + + return ProcessResult(metrics=metrics) diff --git a/src/pipelines/signal_reconstruction_factor_reduced.py b/src/pipelines/signal_reconstruction_factor_reduced.py new file mode 100644 index 0000000..0a19b84 --- /dev/null +++ b/src/pipelines/signal_reconstruction_factor_reduced.py @@ -0,0 +1,563 @@ +import numpy as np + +from .core.base import ProcessPipeline, ProcessResult, registerPipeline, with_attrs + + +@registerPipeline(name="signal_reconstruction") +class Reconstruct(ProcessPipeline): + """ + Tutorial pipeline showing the full surface area of a pipeline: + + - Subclass ProcessPipeline and implement `run(self, h5file) -> ProcessResult`. + - Return metrics (scalars, vectors, matrices, cubes) and optional artifacts. + - Attach HDF5 attributes to any metric via `with_attrs(data, attrs_dict)`. + - Add attributes to the pipeline group (`attrs`) or root file (`file_attrs`). + - No input data is required; this pipeline is purely illustrative. + """ + + description = "Tutorial: metrics + artifacts + dataset attrs + file/pipeline attrs." + v_profile = "/Artery/CrossSections/VelocityProfileSeg/value" + v_profile_interp_onebeat = ( + "/Artery/CrossSections/VelocityProfilesSegInterpOneBeat/value" + ) + vsystol = "/Artery/Velocity/SystolicAccelerationPeakIndexes" + T_input = "/Artery/VelocityPerBeat/beatPeriodSeconds/value" + systole_idx_input = "/Artery/WaveformAnalysis/SystoleIndices/value" + + def gaussian(x, A, mu, sigma, c): + return A * np.exp(-((x - mu) ** 2) / (2 * sigma**2)) + c + + def run(self, h5file) -> ProcessResult: + v_seg = np.maximum(np.asarray(h5file[self.v_profile]), 0) + v_interp_onebeat = np.maximum( + np.asarray(h5file[self.v_profile_interp_onebeat]), 0 + ) + systole_idx = np.asarray(h5file[self.systole_idx_input]) + T = np.asarray(h5file[self.T_input]) + + V = [] + v_profile_beat_threshold = [] + v_profile_beat_ceiled_threshold = [] + v_profile_beat_cropped_threshold = [] + + for beat in range(len(T[0])): + vit_beat = [] + for time_idx in range( + int(systole_idx[0][beat]), int(systole_idx[0][beat + 1]) + ): + Vit_br = [] + for br in range(len(v_seg[0, time_idx, :, 0])): + vit_seg = [] + for segment in range(len(v_seg[0, time_idx, br, :])): + vit_seg.append(v_seg[:, time_idx, br, segment]) + Vit_br.append(vit_seg) + + vit_beat.append(Vit_br) + V.append(vit_beat) + threshold = 6 + + # Metrics are the main numerical outputs; each key becomes a dataset under /pipelines//metrics. + """for threshold_idx in range(threshold + 1): + v_profile_beat = [] + v_profile_beat_ceiled = [] + v_profile_beat_cropped = [] + for beat in range(len(T[0])): + vit_beat = [] + vit_beat_ceiled = [] + vit_beat_cropped = [] + v_raw_temp = np.asarray(V[beat]) + for time_idx in range(len(v_raw_temp[:, 0, 0, 0])): + vit_br = [] + vit_br_ceiled = [] + vit_br_cropped = [] + for br in range(len(v_raw_temp[time_idx, :, 0, 0])): + vit_seg = [] + vit_seg_ceiled = [] + vit_seg_cropped = [] + for segment in range(len(v_raw_temp[time_idx, br, :, 0])): + values_temp = v_raw_temp[time_idx, br, segment, :] + values = list(values_temp) + try: + first = values.index( + next(filter(lambda x: str(x) != "nan", values)) + ) + + other = values[ + np.minimum( + values.index( + next( + filter( + lambda x: str(x) != "nan", values + ) + ) + ), + 17, + ) : + ] + last = first + other.index( + next(filter(lambda x: str(x) == "nan", other)) + ) + + ceil_completion = [ + values[first + threshold_idx - 1] + for v in values[ + first + threshold_idx : last - threshold_idx + ] + ] + v_seg_band_ceiled = ( + values[first : first + threshold_idx] + + ceil_completion + + values[last - threshold_idx : last] + ) + vit_seg_cropped.append( + np.nanmean( + values[first : first + threshold_idx] + + values[last - threshold_idx : last] + ) + ) + vit_seg_ceiled.append(np.nanmean(v_seg_band_ceiled)) + vit_seg.append(np.nanmean(values_temp)) + except Exception: # noqa: BLE001 + vit_seg_cropped.append(np.nan) + vit_seg_ceiled.append(np.nan) + vit_seg.append(np.nanmean(values_temp)) + continue + + vit_br.append(vit_seg) + vit_br_ceiled.append(vit_seg_ceiled) + vit_br_cropped.append(vit_seg_cropped) + + vit_beat.append(vit_br) + vit_beat_ceiled.append(vit_br_ceiled) + vit_beat_cropped.append(vit_br_cropped) + + v_profile_beat.append(vit_beat) + v_profile_beat_ceiled.append(vit_beat_ceiled) + v_profile_beat_cropped.append(vit_beat_cropped) + v_profile_beat_threshold.append(v_profile_beat) + v_profile_beat_ceiled_threshold.append(v_profile_beat_ceiled) + v_profile_beat_cropped_threshold.append(v_profile_beat_cropped) + target_len = 128 + n_beats = len(v_profile_beat) + n_branches = len(v_profile_beat[0][0]) + n_segments = len(v_profile_beat[0][0][0]) + v_threshold_beat_segment = np.zeros( + (threshold + 1, n_beats, target_len, n_branches, n_segments) + ) + v_threshold_beat_segment_cropped = np.zeros( + (threshold + 1, n_beats, target_len, n_branches, n_segments) + ) + v_threshold_beat_segment_ceiled = np.zeros( + (threshold + 1, n_beats, target_len, n_branches, n_segments) + ) + for threshold_idx in range(threshold + 1): + for beat in range(n_beats): + beat_data = np.asarray(v_profile_beat_threshold[threshold_idx][beat]) + beat_data_ceiled = np.asarray( + v_profile_beat_ceiled_threshold[threshold_idx][beat] + ) + beat_data_cropped = np.asarray( + v_profile_beat_cropped_threshold[threshold_idx][beat] + ) + # shape: (time_len, branches, segments) + + time_len = beat_data.shape[0] + + old_indices = np.linspace(0, 1, time_len) + new_indices = np.linspace(0, 1, target_len) + + for br in range(n_branches): + for seg in range(n_segments): + signal = beat_data[:, br, seg] + signal_ceiled = beat_data_ceiled[:, br, seg] + signal_cropped = beat_data_cropped[:, br, seg] + + new_values = np.interp(new_indices, old_indices, signal) + new_values_ceiled = np.interp( + new_indices, old_indices, signal_ceiled + ) + new_values_cropped = np.interp( + new_indices, old_indices, signal_cropped + ) + + v_threshold_beat_segment[threshold_idx, beat, :, br, seg] = ( + new_values + ) + v_threshold_beat_segment_cropped[ + threshold_idx, beat, :, br, seg + ] = new_values_ceiled + v_threshold_beat_segment_ceiled[ + threshold_idx, beat, :, br, seg + ] = new_values_cropped""" + v_raw_temp = np.asarray(v_interp_onebeat) + for threshold_idx in range(threshold + 1): + v_profile = [] + v_profile_ceiled = [] + v_profile_cropped = [] + + for time_idx in range(len(v_raw_temp[:, 0, 0, 0])): + vit_br = [] + vit_br_ceiled = [] + vit_br_cropped = [] + for br in range(len(v_raw_temp[time_idx, 0, :, 0])): + vit_seg = [] + vit_seg_ceiled = [] + vit_seg_cropped = [] + for segment in range(len(v_raw_temp[time_idx, 0, 0, :])): + values_temp = v_raw_temp[time_idx, :, br, segment] + values = list(values_temp) + try: + first = values.index( + next(filter(lambda x: str(x) != "nan", values)) + ) + + other = values[ + np.minimum( + values.index( + next(filter(lambda x: str(x) != "nan", values)) + ), + 17, + ) : + ] + last = first + other.index( + next(filter(lambda x: str(x) == "nan", other)) + ) + len_signal = last - first + ceil_completion = [ + values[first + threshold_idx - 1] + for v in values[ + int( + np.minimum( + first + threshold_idx, + first + np.floor(len_signal / 3), + ) + ) : int( + np.maximum( + last - threshold_idx, + last - np.ceil(len_signal * 2 / 3), + ) + ) + ] + ] + v_seg_band_ceiled = ( + values[ + first : int( + np.minimum( + first + threshold_idx, + first + np.floor(len_signal / 3), + ) + ) + ] + + ceil_completion + + values[ + int( + np.maximum( + last - threshold_idx, + last - np.ceil(len_signal * 2 / 3), + ) + ) : last + ] + ) + vit_seg_cropped.append( + np.nanmean( + values[ + : int( + np.minimum( + first + threshold_idx, + first + np.floor(len_signal / 3), + ) + ) + ] + + values[ + int( + np.maximum( + last - threshold_idx, + last - np.ceil(len_signal * 2 / 3), + ) + ) : + ] + ) + ) + vit_seg_ceiled.append(np.nanmean(v_seg_band_ceiled)) + vit_seg.append(np.nanmean(values_temp)) + except Exception: # noqa: BLE001 + vit_seg_cropped.append(np.nan) + vit_seg_ceiled.append(np.nan) + vit_seg.append(np.nanmean(values_temp)) + continue + + vit_br.append(vit_seg) + vit_br_ceiled.append(vit_seg_ceiled) + vit_br_cropped.append(vit_seg_cropped) + + v_profile.append(vit_br) + v_profile_ceiled.append(vit_br_ceiled) + v_profile_cropped.append(vit_br_cropped) + v_profile_beat_threshold.append(v_profile) + v_profile_beat_ceiled_threshold.append(v_profile_ceiled) + v_profile_beat_cropped_threshold.append(v_profile_cropped) + v_raw = np.asarray(v_profile_beat_threshold) + v_raw = np.maximum(v_raw, 0) + v_raw_ceiled = np.asarray(v_profile_beat_ceiled_threshold) + v_raw_ceiled = np.maximum(v_raw_ceiled, 0) + v_raw_cropped = np.asarray(v_profile_beat_cropped_threshold) + v_raw_cropped = np.maximum(v_raw_cropped, 0) + + moment_1_segment = 0 + + moment_1_segment_cropped = 0 + + moment_1_segment_ceiled = 0 + + Tau_M1_raw_segment = [] + Tau_M1_over_T_raw_segment = [] + RI_raw_segment = [] + R_VTI_raw_segment = [] + + Tau_M1_raw_segment_cropped = [] + Tau_M1_over_T_raw_segment_cropped = [] + RI_raw_segment_cropped = [] + R_VTI_raw_segment_cropped = [] + + Tau_M1_raw_segment_ceiled = [] + Tau_M1_over_T_raw_segment_ceiled = [] + RI_raw_segment_ceiled = [] + R_VTI_raw_segment_ceiled = [] + + ratio_systole_diastole_R_VTI = 0.5 + + for threshold_idx in range(len(v_raw[:, 0, 0, 0])): + Tau_M1_raw_global = [] + Tau_M1_over_T_raw_global = [] + RI_raw_global = [] + R_VTI_raw_global = [] + + Tau_M1_raw_global_ceiled = [] + Tau_M1_over_T_raw_global_ceiled = [] + RI_raw_global_ceiled = [] + R_VTI_raw_global_ceiled = [] + + Tau_M1_raw_global_cropped = [] + Tau_M1_over_T_raw_global_cropped = [] + RI_raw_global_cropped = [] + R_VTI_raw_global_cropped = [] + + for branch_idx in range(len(v_raw[threshold_idx, 0, :, 0])): + Tau_M1_raw_branch = [] + Tau_M1_over_T_raw_branch = [] + RI_raw_branch = [] + R_VTI_raw_branch = [] + + Tau_M1_raw_branch_ceiled = [] + Tau_M1_over_T_raw_branch_ceiled = [] + RI_raw_branch_ceiled = [] + R_VTI_raw_branch_ceiled = [] + + Tau_M1_raw_branch_cropped = [] + Tau_M1_over_T_raw_branch_cropped = [] + RI_raw_branch_cropped = [] + R_VTI_raw_branch_cropped = [] + for _radius_idx in range(len(v_raw[threshold_idx, 0, 0, :])): + v_raw_average = np.nanmean( + v_raw[threshold_idx, :, branch_idx, :], axis=1 + ) + v_raw_ceiled_average = np.nanmean( + v_raw_ceiled[threshold_idx, :, branch_idx, :], axis=1 + ) + v_raw_cropped_average = np.nanmean( + v_raw_cropped[threshold_idx, :, branch_idx, :], axis=1 + ) + t = T[0][0] / len(v_raw_average) + + moment_0_segment = np.nansum(v_raw_average) + moment_0_segment_cropped = np.nansum(v_raw_cropped_average) + moment_0_segment_ceiled = np.nansum(v_raw_ceiled_average) + moment_1_segment = 0 + + moment_1_segment_cropped = 0 + + moment_1_segment_ceiled = 0 + for time_idx in range(len(v_raw_average)): + moment_1_segment += v_raw_average[time_idx] * time_idx * t + + moment_1_segment_cropped += ( + v_raw_cropped_average[time_idx] * time_idx * t + ) + + moment_1_segment_ceiled += ( + v_raw_ceiled_average[time_idx] * time_idx * t + ) + + if moment_0_segment != 0: + TM1 = moment_1_segment / moment_0_segment + Tau_M1_raw_branch.append(TM1) + Tau_M1_over_T_raw_branch.append(TM1 / T[0][0]) + else: + Tau_M1_raw_branch.append(0) + Tau_M1_over_T_raw_branch.append(0) + if moment_0_segment_cropped != 0: + TM1_cropped = ( + moment_1_segment_cropped / moment_0_segment_cropped + ) + Tau_M1_raw_branch_cropped.append(TM1_cropped) + Tau_M1_over_T_raw_branch_cropped.append(TM1_cropped / T[0][0]) + else: + Tau_M1_raw_branch_cropped.append(0) + Tau_M1_over_T_raw_branch_cropped.append(0) + if moment_0_segment_ceiled != 0: + TM1_ceiled = moment_1_segment_ceiled / moment_0_segment_ceiled + Tau_M1_raw_branch_ceiled.append(TM1_ceiled) + Tau_M1_over_T_raw_branch_ceiled.append( + TM1_ceiled / np.nanmean(T[0][:]) + ) + else: + Tau_M1_raw_branch_ceiled.append(0) + Tau_M1_over_T_raw_branch_ceiled.append(0) + + v_raw_max = np.max(v_raw_average) + v_raw_min = np.min(v_raw_average) + v_raw_max_cropped = np.max(v_raw_cropped_average) + v_raw_min_cropped = np.min(v_raw_cropped_average) + v_raw_max_ceiled = np.max(v_raw_ceiled_average) + v_raw_min_ceiled = np.min(v_raw_ceiled_average) + + RI_raw_branch_idx = 1 - (v_raw_min / v_raw_max) + RI_raw_branch.append(RI_raw_branch_idx) + RI_raw_branch_idx_cropped = 1 - ( + v_raw_min_cropped / v_raw_max_cropped + ) + RI_raw_branch_cropped.append(RI_raw_branch_idx_cropped) + RI_raw_branch_idx_ceiled = 1 - (v_raw_min_ceiled / v_raw_max_ceiled) + RI_raw_branch_ceiled.append(RI_raw_branch_idx_ceiled) + + epsilon = 10 ** (-12) + D1_raw = np.sum( + v_raw_average[ + : int( + np.ceil( + len(v_raw_average) * ratio_systole_diastole_R_VTI + ) + ) + ] + ) + D1_raw_cropped = np.sum( + v_raw_cropped_average[ + : int( + np.ceil( + len(v_raw_cropped_average) + * ratio_systole_diastole_R_VTI + ) + ) + ] + ) + D1_raw_ceiled = np.sum( + v_raw_ceiled_average[ + : int( + np.ceil( + len(v_raw_ceiled_average) + * ratio_systole_diastole_R_VTI + ) + ) + ] + ) + D2_raw = np.sum( + v_raw_average[ + int( + np.ceil( + len(v_raw_average) * ratio_systole_diastole_R_VTI + ) + ) : + ] + ) + D2_raw_cropped = np.sum( + v_raw_cropped_average[ + int( + np.ceil( + len(v_raw_cropped_average) + * ratio_systole_diastole_R_VTI + ) + ) : + ] + ) + D2_raw_ceiled = np.sum( + v_raw_ceiled_average[ + int( + np.ceil( + len(v_raw_ceiled_average) + * ratio_systole_diastole_R_VTI + ) + ) : + ] + ) + R_VTI_raw_branch.append(D1_raw / (D2_raw + epsilon)) + R_VTI_raw_branch_cropped.append( + D1_raw_cropped / (D2_raw_cropped + epsilon) + ) + R_VTI_raw_branch_ceiled.append( + D1_raw_ceiled / (D2_raw_ceiled + epsilon) + ) + + Tau_M1_raw_global.append(Tau_M1_raw_branch) + Tau_M1_over_T_raw_global.append(Tau_M1_over_T_raw_branch) + RI_raw_global.append(RI_raw_branch) + R_VTI_raw_global.append(R_VTI_raw_branch) + + Tau_M1_raw_global_cropped.append(Tau_M1_raw_branch_cropped) + Tau_M1_over_T_raw_global_cropped.append( + Tau_M1_over_T_raw_branch_cropped + ) + RI_raw_global_cropped.append(RI_raw_branch_cropped) + R_VTI_raw_global_cropped.append(R_VTI_raw_branch_cropped) + + Tau_M1_raw_global_ceiled.append(Tau_M1_raw_branch_ceiled) + Tau_M1_over_T_raw_global_ceiled.append(Tau_M1_over_T_raw_branch_ceiled) + RI_raw_global_ceiled.append(RI_raw_branch_ceiled) + R_VTI_raw_global_ceiled.append(R_VTI_raw_branch_ceiled) + Tau_M1_raw_segment.append(Tau_M1_raw_global) + Tau_M1_over_T_raw_segment.append(Tau_M1_over_T_raw_global) + RI_raw_segment.append(RI_raw_global) + R_VTI_raw_segment.append(R_VTI_raw_global) + + Tau_M1_raw_segment_cropped.append(Tau_M1_raw_global_cropped) + Tau_M1_over_T_raw_segment_cropped.append(Tau_M1_over_T_raw_global_cropped) + RI_raw_segment_cropped.append(RI_raw_global_cropped) + R_VTI_raw_segment_cropped.append(R_VTI_raw_global_cropped) + + Tau_M1_raw_segment_ceiled.append(Tau_M1_raw_global_ceiled) + Tau_M1_over_T_raw_segment_ceiled.append(Tau_M1_over_T_raw_global_ceiled) + RI_raw_segment_ceiled.append(RI_raw_global_ceiled) + R_VTI_raw_segment_ceiled.append(R_VTI_raw_global_ceiled) + + metrics = { + "signals/v_profile": np.asarray(v_profile_beat_threshold), + "signals/v_profile_cropped": np.asarray(v_profile_beat_ceiled_threshold), + "signals/v_profile_ceiled": np.asarray(v_profile_beat_cropped_threshold), + "tau_M1/tau_M1_raw": with_attrs( + np.asarray(Tau_M1_raw_segment), {"unit": [""]} + ), + "tau_M1_over_T/tau_M1_over_T_raw": with_attrs( + np.asarray(Tau_M1_over_T_raw_segment), {"unit": [""]} + ), + "RI/RI_raw": np.asarray(RI_raw_segment), + "R_VTI/R_VTI_raw": np.asarray(R_VTI_raw_segment), + "tau_M1/tau_M1_raw_cropped": with_attrs( + np.asarray(Tau_M1_raw_segment_cropped), {"unit": [""]} + ), + "tau_M1_over_T/tau_M1_over_T_raw_cropped": with_attrs( + np.asarray(Tau_M1_over_T_raw_segment_cropped), {"unit": [""]} + ), + "RI/RI_raw_cropped": np.asarray(RI_raw_segment_cropped), + "R_VTI/R_VTI_raw_cropped": np.asarray(R_VTI_raw_segment_cropped), + "tau_M1/tau_M1_raw_ceiled": with_attrs( + np.asarray(Tau_M1_raw_segment_ceiled), {"unit": [""]} + ), + "tau_M1_over_T/tau_M1_over_T_raw_ceiled": with_attrs( + np.asarray(Tau_M1_over_T_raw_segment_ceiled), {"unit": [""]} + ), + "RI/RI_raw_ceiled": np.asarray(RI_raw_segment_ceiled), + "R_VTI/R_VTI_raw_ceiled": np.asarray(R_VTI_raw_segment_ceiled), + } + + # Artifacts can store non-metric outputs (strings, paths, etc.). + + return ProcessResult(metrics=metrics)