Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
7a99cee
add euclid dilation
robert-graf Mar 24, 2026
6d37745
add automapping if spineps properties are set
robert-graf Mar 24, 2026
fc875f1
update make_quadrants
robert-graf Mar 24, 2026
ea828ca
add stl generation to nii object
robert-graf Mar 24, 2026
b8ea462
save nrrd for .save call
robert-graf Mar 24, 2026
aa28c33
reg
robert-graf Mar 24, 2026
9be933d
set up itk_coords when loading
robert-graf Mar 24, 2026
d3ae39d
Merge branch 'development_robert' of github.com:Hendrik-code/TPTBox i…
robert-graf Mar 24, 2026
1a82ecd
update treg
robert-graf May 5, 2026
4d248e1
update dicom extract for DSA
robert-graf May 5, 2026
5a7053e
add euclid erode
robert-graf May 5, 2026
0e352f1
update nii functions
robert-graf May 5, 2026
d5556ce
speed up infect
robert-graf May 5, 2026
4782ead
speed up poi gen, fix bug that empty pois are not reoriented
robert-graf May 5, 2026
0d07c14
speed up poi circle gen and fix non rotation for empty pois
robert-graf May 5, 2026
4002140
update comment add feet
robert-graf May 5, 2026
b561527
add logger so spineps can propaly use the print outs
robert-graf May 5, 2026
9287293
add elastic deform
robert-graf May 5, 2026
41a7d77
add key for selecting nnUnet strings
robert-graf May 5, 2026
3f7847e
Merge branch 'development_robert' of github.com:Hendrik-code/TPTBox i…
robert-graf May 5, 2026
f409e35
ruff
robert-graf May 5, 2026
b17456f
ruff
robert-graf May 5, 2026
8f6b780
requested changes
robert-graf May 6, 2026
a2e3c54
Merge branch 'development_robert' of github.com:Hendrik-code/TPTBox i…
robert-graf May 6, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions TPTBox/core/bids_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
"FLASH",
"VF",
"defacemas",
"fluroscopy",
"dw",
"TB1TFL",
"TB1RFM",
Expand Down
6 changes: 5 additions & 1 deletion TPTBox/core/bids_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,16 +632,20 @@ def rename_files(self, path: Path | str, ending=".nii.gz"):
p = Path(path + "." + key)
value.rename(p)

def symlink_files(self, path: Path | str, ending=".nii.gz"):
def symlink_files(self, path: Path | str, ending=".nii.gz", exist_ok=False):
ending = ending if ending[0] == "." else "." + ending
path = str(path)
assert path.endswith(ending), f"set 'ending' to the part after the '.'\n {path} does not end with {ending}"
path = path.replace(ending, "")
for key, value in self.file.items():
p = Path(path + "." + key)

if os.path.islink(p):
assert Path(os.readlink(p)) == value, f"{p} exists"
continue
if exist_ok and p.exists():
continue

os.symlink(value, p)

def get_path_decomposed(self, file_type=None) -> tuple[Path, str, str, str]:
Expand Down
2 changes: 2 additions & 0 deletions TPTBox/core/dicom/dicom2nii_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,8 @@ def test_name_conflict(json_ob, file):
if Path(file).exists():
with open(file) as f:
js = json.load(f)
if "grid" in js:
del js["grid"]
return js != json_ob
return False

Expand Down
69 changes: 66 additions & 3 deletions TPTBox/core/dicom/dicom_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,42 @@ def _generate_bids_path(
return fname.file["json"], fname


def dicom_to_nifti_multiframe_2d(ds, nii_path, pixel_array):
if hasattr(ds, "PixelSpacing"):
dy, dx = map(float, ds.PixelSpacing)
affine = np.eye(4)

if hasattr(ds, "ImageOrientationPatient"):
orientation = list(map(float, ds.ImageOrientationPatient))
row_cosines = np.array(orientation[:3])
col_cosines = np.array(orientation[3:])
affine[:3, 0] = row_cosines * dx
affine[:3, 1] = col_cosines * dy
else:
affine[0, 0] = dx
affine[1, 1] = dy

if hasattr(ds, "ImagePositionPatient"):
affine[:3, 3] = np.array(list(map(float, ds.ImagePositionPatient)))

elif hasattr(ds, "ImagerPixelSpacing"):
dy, dx = map(float, ds.ImagerPixelSpacing)
affine = np.diag([-dx, -dy, 1, 1])

else:
affine = np.eye(4)

nii = nib.Nifti1Image(pixel_array.T[:, :, None], affine)
logger.on_log("Save 2D", nii_path)
nib.save(nii, nii_path)
return nii_path


def dicom_to_nifti_multiframe(ds, nii_path):
pixel_array = ds.pixel_array
if len(pixel_array.shape) == 2:
return dicom_to_nifti_multiframe_2d(ds, nii_path, pixel_array)

if len(pixel_array.shape) != 3 and len(pixel_array.shape) != 4:
raise ValueError(f"Expected a shape with 3 colums not {len(pixel_array.shape)}; {pixel_array.shape=}")
n_frames = pixel_array.shape[0]
Expand Down Expand Up @@ -265,7 +299,22 @@ def _from_dicom_to_nii(
override_subject_name: Callable[[dict, Path], str] | None = None,
chunk=None,
skip_localizer=False,
parent="rawdata",
censor_list=None,
):
if censor_list is None:
censor_list = [
"StudyDate",
"SeriesDate",
"AcquisitionDate",
"ContentDate",
"StudyTime",
"SeriesTime",
"AcquisitionTime",
"ContentTime",
"InstanceCreationDate",
"InstanceCreationTime",
]
if chunk is None:
splitted_dcm_data_l = _classic_get_grouped_dicoms(dcm_data_l)
if len(splitted_dcm_data_l) != 1:
Expand All @@ -282,6 +331,7 @@ def _from_dicom_to_nii(
override_subject_name=override_subject_name,
chunk=i,
skip_localizer=skip_localizer,
parent=parent,
)
outs.append(o)
return outs
Expand All @@ -291,6 +341,9 @@ def _from_dicom_to_nii(
return None

simp_json = get_json_from_dicom(dcm_data_l)
for censor_key in censor_list:
if censor_key in simp_json:
del simp_json[censor_key]
json_file_name, json_bids, nii_path = _get_paths(
simp_json,
dcm_data_l,
Expand All @@ -301,11 +354,13 @@ def _from_dicom_to_nii(
map_series_description_to_file_format,
override_subject_name,
chunk=chunk,
parent=parent,
)
if skip_localizer and json_bids.bids_format == "localizer":
return
logger.print(json_file_name, Log_Type.NEUTRAL, verbose=verbose)
exist = save_json(simp_json, json_file_name)
exist = save_json(simp_json, json_file_name, override=False)
# logger.on_debug(exist, Path(nii_path).exists(), nii_path)
if exist and Path(nii_path).exists():
logger.print("already exists:", json_file_name, ltype=Log_Type.STRANGE, verbose=verbose)
return nii_path
Expand Down Expand Up @@ -520,6 +575,8 @@ def extract_dicom_folder(
n_cpu: int | None = 1,
override_subject_name: Callable[[dict, Path], str] | None = None,
skip_localizer=True,
parent="rawdata",
censor_list: list | None = None,
):
"""
Extract DICOM files from a directory or list of directories, convert them to NIfTI format, and store the output.
Expand All @@ -537,6 +594,8 @@ def extract_dicom_folder(
Returns:
dict: A dictionary with keys representing DICOM series and values as paths to the generated NIfTI files.
"""
if censor_list is None:
censor_list = []
if not validate_slicecount:
convert_dicom.settings.disable_validate_slicecount()
if not validate_orientation:
Expand Down Expand Up @@ -576,6 +635,8 @@ def process_series(key, files, parts):
map_series_description_to_file_format=map_series_description_to_file_format,
override_subject_name=override_subject_name,
skip_localizer=skip_localizer,
parent=parent,
censor_list=censor_list,
)

# Process in parallel or sequentially based on n_cpu
Expand Down Expand Up @@ -606,8 +667,10 @@ def process_series(key, files, parts):


if __name__ == "__main__":
for p in Path("/DATA/NAS/datasets_source/brain/dsa").iterdir():
extract_dicom_folder(p, Path("/DATA/NAS/datasets_source/brain/", "dataset-DSA"), False, False, validate_slice_increment=False)
for p in Path("/media/robert/STORE N GO/DSA_Daten/").iterdir():
extract_dicom_folder(
p, Path("/media/data/robert/datasets", "dataset-Durchleuchtung222"), False, False, validate_slice_increment=False
)

sys.exit()
# s = "/home/robert/Downloads/bein/dataset-oberschenkel/rawdata/sub-1-3-46-670589-11-2889201787-2305829596-303261238-2367429497/mr/sub-1-3-46-670589-11-2889201787-2305829596-303261238-2367429497_sequ-406_mr.nii.gz"
Expand Down
16 changes: 15 additions & 1 deletion TPTBox/core/dicom/dicom_header_to_keys.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
"t2w?_fse.*": "T2w",
".*t1w?_tse.*": "T1w",
".*t1w?_vibe_tra.*": "vibe",
".*Durchleuchtung.*": "fluroscopy",
".*fluroscopy.*": "fluroscopy",
".*scout": "localizer",
"localizer": "localizer",
".*pilot.*": "localizer",
Expand Down Expand Up @@ -267,21 +269,33 @@ def _get(key, default=None):
if modality == "ct":
mri_format = "ct"
elif modality == "xa": # Angiography
biplane = False
if "BIPLANE A" in image_type or "SINGLE A" in image_type:
keys["acq"] = "A"
biplane = True
elif "BIPLANE B" in image_type or "SINGLE B" in image_type:
keys["acq"] = "B"
biplane = True
derived = "DERIVED" in image_type
series_description = _get("SeriesDescription", " ").lower() # "SeriesDescription": "Durchleuchtung - gespeichert",
monitor = _get("PositionerMotion", " ").lower()
# ftv = _get("FrameTimeVector", None).lower()
monitor = _get("PositionerMotion", " ").lower()
tag = _get("DerivationDescription", " ").lower()
# "ImagerPixelSpacing"
# FrameTimeVector = _get("DerivationDescription", [])
# ftv is not None
if tag == "subtraction":
if "durchleuchtung" in series_description or "fluroscopy" in series_description:
mri_format = "fluroscopy"
elif tag == "subtraction":
mri_format = "DSA" if monitor == "static" and "VOLUME" not in image_type and "RECON" not in image_type else "subtraction"
elif "3DRA_PROP" in image_type:
mri_format = "3DRA"
elif monitor == "dynamic" or "VOLUME" in image_type or "RECON" in image_type or "3DRA_PROP" in image_type:
mri_format = "DSA3D"
elif biplane and derived and "VOLUME" not in image_type and "RECON" not in image_type:
##len(FrameTimeVector) >= 1 and (monitor == "static" and "VOLUME" not in image_type and "RECON" not in image_type)
mri_format = "DSA"
else:
mri_format = "XA"
elif modality == "mr":
Expand Down
139 changes: 139 additions & 0 deletions TPTBox/core/internal/elastic_deform.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
import time

import elasticdeform
import numpy as np
from numpy.typing import NDArray

from TPTBox import NII


def deformed_nii(
nii_dic: dict[str, NII],
sigma: float | None = None,
points=None,
deform_factor=1.0,
deform_padding=10,
normalize=True,
joint_normalize=False,
) -> dict[str, NII]:
"""
Deform a dictionary of NII objects using random grid deformation. Requires elasticdeform. 'pip install elasticdeform'

IMPORTANT: Normalize your image data to 0,1. The .seg property of NII shows if this is a segmentation. (NII is form our TPTBox and is a wrapper for nibable)

This function takes a dictionary of NII objects and applies random grid deformation to each object
using specified deformation parameters or, if not provided, random parameters generated based on
the `deform_factor`. The deformed objects are returned as a dictionary.

Args:
arr_dic (dict[str, NII]): A dictionary containing NII objects to be deformed.
sigma (float, optional): The standard deviation of the deformation field. If not provided,
it will be generated based on the `deform_factor`.
points (int, optional): The number of control points for the deformation grid. If not provided,
it will be generated based on the `deform_factor`.
deform_factor (float, optional): A factor used to determine the deformation parameters if
`sigma` and `points` are not specified. Larger values result in stronger deformations.
deform_padding (int, optional): The padding added to the deformed objects to avoid edge artifacts.
verbose (bool, optional): If True, enable verbose logging. Default is True.

Returns:
dict[str, NII]: A dictionary where keys correspond to the input dictionary keys, and values
correspond to the deformed NII objects.

Example:
# Deform a dictionary of NII objects using default deformation parameters
deformed_data = deformed_NII(arr_dic)

# Deform a dictionary of NII objects with specific deformation parameters
sigma = 1.0
points = 20
deformed_data = deformed_NII(arr_dic, sigma=sigma, points=points)
"""
if sigma is None or points is None:
sigma, points = get_random_deform_parameter(deform_factor=deform_factor)

print("deformation parameter sigma = ", round(sigma, 4), "; n_points = ", points)
t = time.time()
values = list(nii_dic.values())
# Deform
if joint_normalize:
max_v = max([img.max() for img in nii_dic.values() if not img.seg])
nii_dic = {k: img if img.seg else img.set_dtype(np.float32) / max_v for k, img in nii_dic.items()}
elif normalize:
nii_dic = {k: img if img.seg else img.set_dtype(np.float32).normalize() for k, img in nii_dic.items()}
else:
nii_dic = {k: img if img.seg else img.set_dtype(np.float32) for k, img in nii_dic.items()}
assert sigma is not None
p = deform_padding
out: list[NDArray] = elasticdeform.deform_random_grid(
[pad(v.get_array(), p=p) for v in values],
sigma=sigma, # type: ignore
points=points,
order=[0 if v.seg else 3 for v in values], # type: ignore
)
out2: dict[str, NII] = {}
for (k, nii), arr in zip(nii_dic.items(), out, strict=True):
out2[k] = nii.set_array(arr[p:-p, p:-p, p:-p])
print("Deformation took", round(time.time() - t, 1), "Seconds")
return out2


def pad(arr, p=10):
return np.pad(arr, p, mode="reflect")


def get_random_deform_parameter(deform_factor: float = 1):
"""
Generate random deformation parameters for use in 3D deformation.

This function generates random values for the deformation parameters, including 'sigma' and 'points',
based on the specified deformation factor. These parameters are used for 3D deformation operations.

Args:
deform_factor (float, optional): A factor to control the strength of deformation. Default is 1.

Returns:
tuple[float, int]: A tuple containing the generated 'sigma' (float) and 'points' (int) parameters.

Example:
# Generate random deformation parameters with a deformation factor of 1
sigma, points = get_random_deform_parameter()

# Generate random deformation parameters with a deformation factor of 2
sigma, points = get_random_deform_parameter(deform_factor=2)
"""
sigma = 2 + np.random.uniform() * 2.5 # 1,5 - 4.5
min_points = 3
max_points = 17
if sigma < 2:
max_points = 17
elif sigma < 1.7:
max_points = 16
elif sigma < 2.1:
max_points = 15
elif sigma < 2.3:
max_points = 14
elif sigma < 2.5:
max_points = 13
elif sigma < 2.6:
max_points = 12
elif sigma < 2.7:
max_points = 11
elif sigma < 2.8:
max_points = 10
elif sigma < 3:
max_points = 9
elif sigma < 3.5:
max_points = 8
elif sigma < 4.0:
max_points = 7
elif sigma < 4.3:
max_points = 6
else:
max_points = 5
points = np.random.randint(max_points - min_points + 1) + min_points
# Stronger
sigma *= deform_factor
# points *= deform_factor
points = max(round(points), 1)
return (sigma, points)
Loading
Loading