Skip to content

API Reference

The following sections summarize all individual functions and classes contained in the library.

Model builder pipeline

MRI2FE.Pipelines.FEModelbuilder

Source code in src/MRI2FE/Pipelines/new_model.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
class FEModelbuilder:
    def __init__(
        self, title: str = "", source: str = "", imgout: Optional[str] = None
    ):
        """Initialize the FEModel object to store model data.

        Args:
            title (str, optional): Optional title for the model which will be written to output solver decks. Defaults to "".
            source (str, optional): Optional source folder for model for internal tracking. Defaults to "".
        """
        self.model = FEModel(title=title, source=source, imgout=imgout)

    def mesh(
        self,
        img_path: str,
        img_labels: Optional[List[str]] = None,
        optimize: bool = False,
        **kwargs,
    ):
        """Generate a tetrahedral mesh from labeled MRI data and store in the FEModel object

        Args:
            img_path (str): Path to segmented, labeled MRI image.
            img_labels (List[str], optional): Optional labels providing names for each region of the labeled image. Defaults to None.
            optimize (bool, optional): Whether to perform post-process optimization on the tetrahedral mesh.  Increases quality and run time. Defaults to False.
        """
        self.labeled_geom = image_read(img_path)
        self.geom_labels = img_labels

        msh = mesh_from_nifti(filepath=img_path, optimize=optimize, **kwargs)

        self.model.from_meshio(msh, region_names=img_labels)

        return self

    def map_mre(
        self,
        target_label: int = 4,
        MRE_type: Literal[
            "stiffness_damping", "complex_shear"
        ] = "stiffness_damping",
        MRE_geom: Optional[List[Union[str, Any]]] = None,
        MRE_mask: Optional[Union[str, Any]] = None,
        MRE_frequency: Optional[List[float]] = None,
        MRE_to_transform: Optional[List[Tuple[Union[str, Any]]]] = None,
        n_segs: int = 5,
        **kwargs,
    ):
        """Calculate material model coefficients and map MRE material assignments onto an ROI on the mesh.

        Args:
            target_label (int, optional): Target integer label on the labeled image to map MRE material properties to. Defaults to 4.
            MRE_type ("stiffness_damping" or "complex_shear", optional): Specify whether MRE files are provided as shear stiffness and damping ratio or as storage and loss moduli. Defaults to "stiffness_damping".
            MRE_geom (List[str  |  Any], optional): List of images or paths to images for MRE geometries at each frequency. Defaults to None.
            MRE_mask (str | Any, optional): ROI mask for MRE geometry. Defaults to None.
            MRE_frequency (List[float], optional): List of frequencies for each MRE geometry image. Defaults to None.
            MRE_to_transform (List[Tuple[str  |  Any]], optional): List of tuples of strings or MRE images.  Each tuple represents either shear/damping or storage/loss moduli at a frequency given in MRE_frequency. Defaults to None.
            n_segs (int, optional): Number of segments to discretize the MRE material properties into. Defaults to 5.

        """
        _, transformed = coregister_MRE_images(
            segmented_geom=self.labeled_geom,
            target_label=target_label,
            MRE_geom=MRE_geom,
            MRE_mask=MRE_mask,
            MRE_to_transform=MRE_to_transform,
            imgout=cast(str, self.model.metadata["imgout"]),
            **kwargs,
        )
        # handle edge case if only one MRE frequency is used
        if isinstance(transformed, tuple):
            transformed = [transformed]

        self.transformed_mre = transformed

        labels, region_avgs = segment_MRE_regions(
            img_list=transformed,
            n_segs=n_segs,
            imgout=cast(str, self.model.metadata["imgout"]),
            imgout_geom=self.labeled_geom,
        )

        regions_props = []
        for i in range(len(region_avgs)):
            if MRE_type == "stiffness_damping":
                regions_props.append(
                    calculate_prony(
                        mu=region_avgs["1"][i],
                        xi=region_avgs["2"][i],
                        w=MRE_frequency,
                    )
                )

            elif MRE_type == "complex_shear":
                regions_props.append(
                    calculate_prony(
                        gp=region_avgs["1"][i],
                        gpp=region_avgs["2"][i],
                        w=MRE_frequency,
                    )
                )

        if self.geom_labels is not None:
            self.model = map_MRE_to_mesh(
                self.model,
                label_img=labels,
                region_properties=regions_props,
                target_region_id=target_label,
                region_prefix=self.geom_labels[target_label - 1],
            )
        else:
            self.model = map_MRE_to_mesh(
                self.model,
                label_img=labels,
                region_properties=regions_props,
                target_region_id=target_label,
            )

        return self

    def write(self, fpath: str, type: Literal["lsdyna"] = "lsdyna"):
        """Write model to output solver deck

        Args:
            fpath (str): File path to save output to.
            type ("lsdyna", optional): Output type to be saved.  Currently, only LS-DYNA is supported. Defaults to "lsdyna".

        """
        if type == "lsdyna":
            self.model.write_lsdyna(fpath)

        return self

    def build(self):
        """Return generated FEModel"""
        return self.model

__init__(title='', source='', imgout=None)

Initialize the FEModel object to store model data.

Parameters:

Name Type Description Default
title str

Optional title for the model which will be written to output solver decks. Defaults to "".

''
source str

Optional source folder for model for internal tracking. Defaults to "".

''
Source code in src/MRI2FE/Pipelines/new_model.py
13
14
15
16
17
18
19
20
21
22
def __init__(
    self, title: str = "", source: str = "", imgout: Optional[str] = None
):
    """Initialize the FEModel object to store model data.

    Args:
        title (str, optional): Optional title for the model which will be written to output solver decks. Defaults to "".
        source (str, optional): Optional source folder for model for internal tracking. Defaults to "".
    """
    self.model = FEModel(title=title, source=source, imgout=imgout)

build()

Return generated FEModel

Source code in src/MRI2FE/Pipelines/new_model.py
145
146
147
def build(self):
    """Return generated FEModel"""
    return self.model

map_mre(target_label=4, MRE_type='stiffness_damping', MRE_geom=None, MRE_mask=None, MRE_frequency=None, MRE_to_transform=None, n_segs=5, **kwargs)

Calculate material model coefficients and map MRE material assignments onto an ROI on the mesh.

Parameters:

Name Type Description Default
target_label int

Target integer label on the labeled image to map MRE material properties to. Defaults to 4.

4
MRE_type stiffness_damping or complex_shear

Specify whether MRE files are provided as shear stiffness and damping ratio or as storage and loss moduli. Defaults to "stiffness_damping".

'stiffness_damping'
MRE_geom List[str | Any]

List of images or paths to images for MRE geometries at each frequency. Defaults to None.

None
MRE_mask str | Any

ROI mask for MRE geometry. Defaults to None.

None
MRE_frequency List[float]

List of frequencies for each MRE geometry image. Defaults to None.

None
MRE_to_transform List[Tuple[str | Any]]

List of tuples of strings or MRE images. Each tuple represents either shear/damping or storage/loss moduli at a frequency given in MRE_frequency. Defaults to None.

None
n_segs int

Number of segments to discretize the MRE material properties into. Defaults to 5.

5
Source code in src/MRI2FE/Pipelines/new_model.py
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def map_mre(
    self,
    target_label: int = 4,
    MRE_type: Literal[
        "stiffness_damping", "complex_shear"
    ] = "stiffness_damping",
    MRE_geom: Optional[List[Union[str, Any]]] = None,
    MRE_mask: Optional[Union[str, Any]] = None,
    MRE_frequency: Optional[List[float]] = None,
    MRE_to_transform: Optional[List[Tuple[Union[str, Any]]]] = None,
    n_segs: int = 5,
    **kwargs,
):
    """Calculate material model coefficients and map MRE material assignments onto an ROI on the mesh.

    Args:
        target_label (int, optional): Target integer label on the labeled image to map MRE material properties to. Defaults to 4.
        MRE_type ("stiffness_damping" or "complex_shear", optional): Specify whether MRE files are provided as shear stiffness and damping ratio or as storage and loss moduli. Defaults to "stiffness_damping".
        MRE_geom (List[str  |  Any], optional): List of images or paths to images for MRE geometries at each frequency. Defaults to None.
        MRE_mask (str | Any, optional): ROI mask for MRE geometry. Defaults to None.
        MRE_frequency (List[float], optional): List of frequencies for each MRE geometry image. Defaults to None.
        MRE_to_transform (List[Tuple[str  |  Any]], optional): List of tuples of strings or MRE images.  Each tuple represents either shear/damping or storage/loss moduli at a frequency given in MRE_frequency. Defaults to None.
        n_segs (int, optional): Number of segments to discretize the MRE material properties into. Defaults to 5.

    """
    _, transformed = coregister_MRE_images(
        segmented_geom=self.labeled_geom,
        target_label=target_label,
        MRE_geom=MRE_geom,
        MRE_mask=MRE_mask,
        MRE_to_transform=MRE_to_transform,
        imgout=cast(str, self.model.metadata["imgout"]),
        **kwargs,
    )
    # handle edge case if only one MRE frequency is used
    if isinstance(transformed, tuple):
        transformed = [transformed]

    self.transformed_mre = transformed

    labels, region_avgs = segment_MRE_regions(
        img_list=transformed,
        n_segs=n_segs,
        imgout=cast(str, self.model.metadata["imgout"]),
        imgout_geom=self.labeled_geom,
    )

    regions_props = []
    for i in range(len(region_avgs)):
        if MRE_type == "stiffness_damping":
            regions_props.append(
                calculate_prony(
                    mu=region_avgs["1"][i],
                    xi=region_avgs["2"][i],
                    w=MRE_frequency,
                )
            )

        elif MRE_type == "complex_shear":
            regions_props.append(
                calculate_prony(
                    gp=region_avgs["1"][i],
                    gpp=region_avgs["2"][i],
                    w=MRE_frequency,
                )
            )

    if self.geom_labels is not None:
        self.model = map_MRE_to_mesh(
            self.model,
            label_img=labels,
            region_properties=regions_props,
            target_region_id=target_label,
            region_prefix=self.geom_labels[target_label - 1],
        )
    else:
        self.model = map_MRE_to_mesh(
            self.model,
            label_img=labels,
            region_properties=regions_props,
            target_region_id=target_label,
        )

    return self

mesh(img_path, img_labels=None, optimize=False, **kwargs)

Generate a tetrahedral mesh from labeled MRI data and store in the FEModel object

Parameters:

Name Type Description Default
img_path str

Path to segmented, labeled MRI image.

required
img_labels List[str]

Optional labels providing names for each region of the labeled image. Defaults to None.

None
optimize bool

Whether to perform post-process optimization on the tetrahedral mesh. Increases quality and run time. Defaults to False.

False
Source code in src/MRI2FE/Pipelines/new_model.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
def mesh(
    self,
    img_path: str,
    img_labels: Optional[List[str]] = None,
    optimize: bool = False,
    **kwargs,
):
    """Generate a tetrahedral mesh from labeled MRI data and store in the FEModel object

    Args:
        img_path (str): Path to segmented, labeled MRI image.
        img_labels (List[str], optional): Optional labels providing names for each region of the labeled image. Defaults to None.
        optimize (bool, optional): Whether to perform post-process optimization on the tetrahedral mesh.  Increases quality and run time. Defaults to False.
    """
    self.labeled_geom = image_read(img_path)
    self.geom_labels = img_labels

    msh = mesh_from_nifti(filepath=img_path, optimize=optimize, **kwargs)

    self.model.from_meshio(msh, region_names=img_labels)

    return self

write(fpath, type='lsdyna')

Write model to output solver deck

Parameters:

Name Type Description Default
fpath str

File path to save output to.

required
type lsdyna

Output type to be saved. Currently, only LS-DYNA is supported. Defaults to "lsdyna".

'lsdyna'
Source code in src/MRI2FE/Pipelines/new_model.py
132
133
134
135
136
137
138
139
140
141
142
143
def write(self, fpath: str, type: Literal["lsdyna"] = "lsdyna"):
    """Write model to output solver deck

    Args:
        fpath (str): File path to save output to.
        type ("lsdyna", optional): Output type to be saved.  Currently, only LS-DYNA is supported. Defaults to "lsdyna".

    """
    if type == "lsdyna":
        self.model.write_lsdyna(fpath)

    return self

Meshing

MRI2FE.mesh_from_nifti(filepath, optimize=False, facetAngle=30.0, facetSize=1.0, facetDistance=4.0, cellRadiusEdgeRatio=3.0, cellSize=1.0)

Source code in src/MRI2FE/generate_mesh.py
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def mesh_from_nifti(
    filepath: str,
    optimize: bool = False,
    facetAngle: float = 30.0,
    facetSize: float = 1.0,
    facetDistance: float = 4.0,
    cellRadiusEdgeRatio: float = 3.0,
    cellSize: float = 1.0,
) -> meshio.Mesh:
    if not os.path.exists(filepath):
        raise ValueError(f"input filepath {filepath} does not exist")

    image = image_read(filepath)

    origin = image.origin

    transfer_path = nifti_to_inr(image=image)

    mesh_path = mesh_wrapper(
        transfer_path,
        optimize,
        facetAngle,
        facetSize,
        facetDistance,
        cellRadiusEdgeRatio,
        cellSize,
    )

    if not os.path.exists(mesh_path):
        raise ValueError(f"Mesh wrapper did not create mesh at {mesh_path}")

    try:
        mesh: meshio.Mesh = meshio.read(mesh_path)

        mesh.points = mesh.points + np.array(origin)

        return mesh
    except ValueError:
        raise ValueError(
            "Error loading mesh from file, mesh may be too large..."
        )

MRE Coregistration

MRI2FE.MRE.coregister_MRE_images(segmented_geom, target_label=4, segmented_mask=None, MRE_geom=None, MRE_mask=None, MRE_to_transform=None, imgout=None, type_of_transform='Affine')

Coregister MRE geometry image to segmented geometry image, and transform corresponding MRE images.

Parameters:

Name Type Description Default
segmented_geom Union[str, ANTsImage]

Segmented geometry image used for mesh creation

required
target_label int

Label for ROI on geometry image. Used to create geometry mask if one is not provided. Defaults to 4.

4
segmented_mask Union[str, ANTsImage]

Binary mask for ROI on geometry image. Defaults to None.

None
MRE_geom List[Union[str, ANTsImage]]

List of geometry images associated with MRE at different frequencies. Defaults to None.

None
MRE_mask Union[str, ANTsImage]

Binary mask associated with ROI in MRE images. Defaults to None.

None
MRE_to_transform List[Tuple[Union[str, ANTsImage]]]

List of tuples, each tuple containing MRE images associated with one of the MRE_geom images provided. Defaults to None.

None
imgout str

Filepath to save validation images of the transformations applied. Defaults to None.

None
type_of_transform str

Type of transform, see Antspy registration documentation for guidance. Defaults to "Affine".

'Affine'

Raises:

Type Description
ValueError

No MRE/segmented geometry image provided

FileNotFoundError

MRE/segmented geometry files not found

TypeError

Image files are not a filepath string or AntsImage

ValueError

Transform could not be resolved

ValueError

MRE_geom and MRE_to_Transform are different lengths

ValueError

imgout path could not be found

Returns:

Name Type Description
Transformations List

list of transformations

Transformed_images List

list of transformed image tuples

Source code in src/MRI2FE/MRE/MRE_coregistration.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
def coregister_MRE_images(
    segmented_geom: Union[str, ANTsImage],
    target_label: int = 4,
    segmented_mask: Optional[Union[str, ANTsImage]] = None,
    MRE_geom: Optional[List[Union[str, ANTsImage]]] = None,
    MRE_mask: Optional[Union[str, ANTsImage]] = None,
    MRE_to_transform: Optional[List[Tuple[Union[str, ANTsImage]]]] = None,
    imgout: Optional[str] = None,
    type_of_transform: str = "Affine",
):
    """Coregister MRE geometry image to segmented geometry image, and transform corresponding MRE images.

    Args:
        segmented_geom (Union[str, ANTsImage]): Segmented geometry image used for mesh creation
        target_label (int, optional): Label for ROI on geometry image. Used to create geometry mask if one is not provided. Defaults to 4.
        segmented_mask (Union[str,ANTsImage], optional): Binary mask for ROI on geometry image. Defaults to None.
        MRE_geom (List[Union[str, ANTsImage]], optional): List of geometry images associated with MRE at different frequencies. Defaults to None.
        MRE_mask (Union[str, ANTsImage], optional): Binary mask associated with ROI in MRE images. Defaults to None.
        MRE_to_transform (List[Tuple[Union[str, ANTsImage]]], optional): List of tuples, each tuple containing MRE images associated with one of the MRE_geom images provided. Defaults to None.
        imgout (str, optional): Filepath to save validation images of the transformations applied. Defaults to None.
        type_of_transform (str, optional): Type of transform, see Antspy registration documentation for guidance. Defaults to "Affine".

    Raises:
        ValueError: No MRE/segmented geometry image provided
        FileNotFoundError: MRE/segmented geometry files not found
        TypeError: Image files are not a filepath string or AntsImage
        ValueError: Transform could not be resolved
        ValueError: MRE_geom and MRE_to_Transform are different lengths
        ValueError: imgout path could not be found

    Returns:
        Transformations (List): list of transformations
        Transformed_images (List): list of transformed image tuples
    """
    if segmented_geom is None:
        raise ValueError("Segmented geometry image is required")

    if MRE_geom is None:
        raise ValueError("MRE geometry image is required")

    MRE_geom = _entry_to_list(MRE_geom)
    MRE_to_transform = _entry_to_list(MRE_to_transform)

    # Load segmented geometry
    if isinstance(segmented_geom, str):
        if not os.path.exists(segmented_geom):
            raise FileNotFoundError(
                f"Geometry mask file not found: {segmented_geom}"
            )
        segmented_geom = image_read(segmented_geom)
    elif not isinstance(segmented_geom, ANTsImage):
        raise TypeError(
            "geom_mask must be either a filepath string or ANTsImage object"
        )

    # Load segmented mask or threshold image
    if segmented_mask is None:
        segmented_mask = threshold_image(
            segmented_geom,
            low_thresh=target_label - 0.01,
            high_thresh=target_label + 1.01,
        )
    elif isinstance(segmented_mask, str):
        if not os.path.exists(segmented_mask):
            raise FileNotFoundError(
                f"Geometry image file not found: {segmented_mask}"
            )
        segmented_geom = image_read(segmented_geom)
    elif not isinstance(segmented_geom, ANTsImage):
        raise TypeError("geom must be a filepath string")

    # load MRE geometries
    MRE_geom_imgs = []
    for img in MRE_geom:
        MRE_geom_imgs.append(_ensure_image(img))

    # load MRE mask
    if MRE_mask is not None and isinstance(MRE_mask, str):
        if not os.path.exists(MRE_mask):
            raise FileNotFoundError(
                f"Geometry image file not found: {MRE_mask}"
            )
        MRE_mask = image_read(MRE_mask)
    elif not isinstance(segmented_geom, ANTsImage):
        raise TypeError(
            "geom must be either a filepath string or ANTsImage object"
        )

    # load images to transform
    MRE_transform_imgs = []
    for imgs in MRE_to_transform:
        MRE_transform_imgs.append(_ensure_tuple(imgs))

    if len(MRE_transform_imgs) != len(MRE_geom_imgs):
        raise ValueError(
            f"{len(MRE_geom_imgs)} geometry images were provided but {len(MRE_transform_imgs)} transform tuples were provided"
        )

    # create each transform and image
    transformations = []
    transformed_images = []
    for idx, (geom, img_tuple) in enumerate(
        zip(MRE_geom_imgs, MRE_transform_imgs)
    ):
        # resample geometry to MRE image
        geom_resample = resample_image(
            segmented_geom, geom.shape, use_voxels=True
        )
        mask_resample = resample_image(
            segmented_mask, geom.shape, use_voxels=True
        )

        # calculate transform
        try:
            if MRE_mask is not None:
                tx = registration(
                    fixed=geom_resample,
                    moving=geom,
                    type_of_transform=type_of_transform,
                    mask=mask_resample,
                    moving_mask=MRE_mask,
                )
            else:
                tx = registration(
                    fixed=geom_resample,
                    moving=geom,
                    type_of_transform=type_of_transform,
                    mask=mask_resample,
                )
        except ValueError:
            print(f"transformation failed on image {idx}")

        transformations.append(tx["fwdtransforms"])

        # apply transforms
        transformed_tuple = tuple(
            apply_transforms(
                fixed=geom_resample,
                moving=img,
                transformlist=tx["fwdtransforms"],
            )
            for img in img_tuple
        )
        transformed_images.append(transformed_tuple)
        print(f"in MRE coregistration: {imgout}")
        if imgout is not None:
            if not os.path.exists(imgout):
                raise ValueError("imgout directory does not exist")
            else:
                base = os.path.join(imgout, f"MRE{idx}_coreg.png")
                plot(
                    segmented_geom,
                    overlay=tx["warpedmovout"],
                    overlay_cmap="Dark2",
                    overlay_alpha=0.8,
                    filename=base,
                    axis=0,
                )

    # return single dict or list of dicts
    if len(transformed_images) == 0:
        raise ValueError("No results generated from MRE coregistration")
    elif len(transformed_images) == 1:
        return transformations[0], transformed_images[0]
    else:
        return transformations, transformed_images

MRI2FE.MRE.segment_MRE_regions(img_list, n_segs=5, imgout=None, imgout_geom=None)

Kmeans segmentation of MRE images

Parameters:

Name Type Description Default
img_list List[Tuple[ANTsImage]]

List of tuples of ANTsImage, each tuple representing the two images available for MRE at a given frequency.

required
n_segs int

Number of segments to generate. Defaults to 5.

5
imgout str

optional directory path to save validation images. Defaults ot None.

None

Returns:

Name Type Description
Labels ANTsImage

Image containing integer labels for each region of the MRE images.

km_ants dict

Dictionary containing average properties for each region. Keys are "1" and "2" for the two input images for each tuple, each key contains a list of length n_tuples which has the average properties for that cluster

Source code in src/MRI2FE/MRE/MRE_coregistration.py
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
def segment_MRE_regions(
    img_list: List[Tuple[ANTsImage, ANTsImage]],
    n_segs: int = 5,
    imgout: Optional[str] = None,
    imgout_geom: Union[str, ANTsImage] = None,
):
    """Kmeans segmentation of MRE images

    Args:
        img_list (List[Tuple[ANTsImage]]): List of tuples of ANTsImage, each tuple representing the two images available for MRE at a given frequency.
        n_segs (int, optional): Number of segments to generate. Defaults to 5.
        imgout (str, optional): optional directory path to save validation images.  Defaults ot None.

    Returns:
        Labels (ANTsImage): Image containing integer labels for each region of the MRE images.
        km_ants (dict): Dictionary containing average properties for each region.  Keys are "1" and "2" for the two input images for each tuple, each key contains a list of length n_tuples which has the average properties for that cluster
    """

    # check image list contents:
    for tup in img_list:
        for entry in tup:
            if not isinstance(entry, ANTsImage):
                raise ValueError(
                    "All entries in img_list must be tuples of AntsImage"
                )

    # calculate array dimensions
    ants_size = img_list[0][0].numpy().size
    ants_shape = img_list[0][0].numpy().shape
    n_img = 2
    n_features = len(img_list)

    # build kmeans array
    samples_list = []
    for tup in img_list:
        samples_list.append(tup[0].numpy().flatten())
        samples_list.append(tup[1].numpy().flatten())

    samples = np.array(
        samples_list
    ).T  # rows = samples (voxels), columns = features (MRE values)

    if not samples.shape == (ants_size, n_features * n_img):
        raise ValueError(
            "internal error: sample dimensions do not match post k-means array assembly"
        )

    kmeans = KMeans(n_clusters=n_segs + 1).fit(samples)
    # create label image
    km_label_array = kmeans.labels_.reshape(ants_shape)

    km_label_ants = new_image_like(img_list[0][0], km_label_array)

    # create region average properties
    print(kmeans.cluster_centers_.shape)
    km_avgs: dict = {"1": [], "2": []}
    for row in kmeans.cluster_centers_:
        label_1 = row[::2].tolist()
        label_2 = row[1::2].tolist()

        km_avgs["1"].append(label_1)
        km_avgs["2"].append(label_2)

    if imgout is not None:
        if not os.path.exists(imgout):
            raise ValueError("imgout directory does not exist")
        elif imgout_geom is None:
            for idx, img in enumerate(img_list):
                base = os.path.join(imgout, f"MRE{idx}_segmentation.png")
                plot(
                    img[0],
                    overlay=km_label_ants,
                    overlay_cmap="tab10",
                    overlay_alpha=0.5,
                    filename=base,
                    axis=0,
                )
        elif isinstance(imgout_geom, str):
            img = image_read(imgout_geom)
            base = os.path.join(imgout, "MRE_segmentation.png")
            plot(
                img,
                overlay=km_label_ants,
                overlay_cmap="tab10",
                overlay_alpha=0.5,
                filename=base,
                axis=0,
            )
        elif isinstance(imgout_geom, ANTsImage):
            base = os.path.join(imgout, "MRE_segmentation.png")
            plot(
                imgout_geom,
                overlay=km_label_ants,
                overlay_cmap="tab10",
                overlay_alpha=0.5,
                filename=base,
                axis=0,
            )
        else:
            raise ValueError("imgout_geom must be a string or ANTsImage")

    return km_label_ants, km_avgs

MRI2FE.MRE.calculate_prony

calculate_prony(gp=None, gpp=None, mu=None, xi=None, w=None, tol=0.001)

Calculate the 1st order prony series equivalent to the complex shear modulus representation of brain tissue viscoelasticity

Parameters:

Name Type Description Default
gp array - like

storage moduli at different frequencies. If gp is provided, gpp must also be provided.

None
gpp array - like

loss moduli at different frequencies. If gpp is provided, gp must also be provided.

None
mu array - like

Shear stiffness at different frequencies. If mu is provided, xi must also be provided.

None
xi array - like

Damping ratio at different frequencies. If xi, is provided, mu must also be provided.

None
w array - like

MRE frequency for each gp/gpp or mu/xi value, must follow the same order.

None
tol float

Back calculation tolerance check. Defaults to 1.00e-3.

0.001

Raises:

Type Description
ValueError

gp/gpp or mu/xi not provided

ValueError

number of inputs on each array do not match

ValueError

Mu or Xi back-calculation out of range

Returns:

Name Type Description
Ginf float

long-time shear modulus

G1 float

short-time shear modulus

tau float

short-time time constant

gp float

Storage modulus

gpp float

Loss modulus

Source code in src/MRI2FE/MRE/calculate_prony.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def calculate_prony(
    gp: Optional[ArrayLike] = None,
    gpp: Optional[ArrayLike] = None,
    mu: Optional[ArrayLike] = None,
    xi: Optional[ArrayLike] = None,
    w: Optional[ArrayLike] = None,
    tol=1.00e-3,
):
    """Calculate the 1st order prony series equivalent to the complex shear modulus representation of brain tissue viscoelasticity

    Args:
        gp (array-like): storage moduli at different frequencies. If gp is provided, gpp must also be provided.
        gpp (array-like): loss moduli at different frequencies.  If gpp is provided, gp must also be provided.
        mu (array-like): Shear stiffness at different frequencies.  If mu is provided, xi must also be provided.
        xi (array-like): Damping ratio at different frequencies.  If xi, is provided, mu must also be provided.
        w (array-like): MRE frequency for each gp/gpp or mu/xi value, must follow the same order.
        tol (float, optional): Back calculation tolerance check. Defaults to 1.00e-3.

    Raises:
        ValueError: gp/gpp or mu/xi not provided
        ValueError: number of inputs on each array do not match
        ValueError: Mu or Xi back-calculation out of range

    Returns:
        Ginf (float): long-time shear modulus
        G1 (float): short-time shear modulus
        tau (float): short-time time constant
        gp (float): Storage modulus
        gpp (float): Loss modulus
    """

    # check for valid input
    first_set_valid = all(param is not None for param in [gp, gpp, w])
    second_set_valid = all(param is not None for param in [mu, xi, w])

    # Raise error if neither parameter set is complete
    if not (first_set_valid or second_set_valid):
        raise ValueError(
            "You must provide either (gp, gpp, w) or (mu, xi, w) as inputs"
        )

    # check if all are array like
    first_set_type = all(_is_array(o) for o in [gp, gpp, w])
    second_set_type = all(_is_array(o) for o in [mu, xi, w])

    if not (first_set_type or second_set_type):
        raise ValueError("Unmatched number of values for input parameters")

    if second_set_type:
        mu = np.asarray(mu)
        xi = np.asarray(xi)

    w = np.asarray(w)

    # calculate complex modulus from stiffness and damping ratio (array)
    if second_set_valid:
        assert mu is not None, "mu must be provided if xi is provided"
        assert xi is not None, "xi must be provided if mu is provided"

        mu = np.abs(mu)
        xi = np.abs(xi)

        a = np.sqrt(1.0 + 4.0 * np.square(xi))

        gp_array: np.ndarray = ((1.0 + a) * mu) / (2.0 * np.square(a))
        gpp_array: np.ndarray = 2.0 * xi * gp_array
        gmag_array: np.ndarray = np.sqrt(
            np.square(gp_array) + np.square(gpp_array)
        )

        # back-calculate mu and xi to check calculation, raise error if they don't match
        mu_bc = 2.0 * np.square(gmag_array) / (gp_array + gmag_array)
        xi_bc = gpp_array / (2.0 * gp_array)

        if np.sum((mu - mu_bc) ** 2) >= tol:
            raise ValueError(
                f"Error in mu back-calculation: mu = {mu}, mu_bc = {mu_bc}."
            )
        if np.sum((xi - xi_bc) ** 2) >= tol:
            raise ValueError(
                f"Error in xi back-calculation, xi = {xi}, xi_bc = {xi_bc}.."
            )

    else:
        assert gp is not None, "gp must be provided if gpp is provided"
        assert gpp is not None, "gpp must be provided if gp is provided"

        gp_array = np.asarray(gp)
        gpp_array = np.asarray(gpp)

    # calculate te prony series constants

    Ginf, G1, tau = prony_series(gp_array, gpp_array, w)

    return Ginf, G1, tau

prony_series(gp, gpp, w)

Calculate the 1-term prony series constants for an equivalent complex shear modulus using least squares optimization

Parameters:

Name Type Description Default
gp float

Storage modulus

required
gpp float

Loss modulus

required
w float

Modulus frequency

required

Returns:

Name Type Description
Ginf float

Long-time shear modulus

G1 float

short-time shear modulus

tau float

time constant

Source code in src/MRI2FE/MRE/calculate_prony.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
def prony_series(
    gp: Union[float, np.ndarray],
    gpp: Union[float, np.ndarray],
    w: Union[float, np.ndarray],
):
    """
    Calculate the 1-term prony series constants for an equivalent complex shear modulus using least squares optimization

    Arguments:
        gp (float): Storage modulus
        gpp (float): Loss modulus
        w (float): Modulus frequency

    Returns:
        Ginf (float): Long-time shear modulus
        G1 (float): short-time shear modulus
        tau (float): time constant
    """

    # Ensure all inputs are ndarrays for consistent handling
    gp = np.asarray(gp)
    gpp = np.asarray(gpp)
    w = np.asarray(w)

    # Check if we have scalar or vector inputs
    is_scalar_input = gp.ndim == 0 and gpp.ndim == 0 and w.ndim == 0

    # Initial guess for optimization variables [g1, g2, tau]
    if is_scalar_input:
        x0 = np.array([1.0, 1.0, 1.0 / w])
    else:
        x0 = np.array([1.0, 1.0, 1.0 / np.mean(w)])

    # Bounds for variables (all non-negative)
    bounds = [(0, None), (0, None), (0, None)]

    # Objective function to minimize
    def objective(x):
        g1, g2, tau = x
        # Calculate the expressions for gp and gpp
        gpex = g1 + (g2 * w**2 * tau**2) / (1 + w**2 * tau**2)
        gppex = (g2 * w * tau) / (1 + w**2 * tau**2)

        # Return the sum of squared errors
        return np.sum((gp - gpex) ** 2 + (gpp - gppex) ** 2)

    # Constraint: gppex/gpex = gpp/gp
    def constraint(x):
        g1, g2, tau = x
        gpex = g1 + (g2 * w**2 * tau**2) / (1 + w**2 * tau**2)
        gppex = (g2 * w * tau) / (1 + w**2 * tau**2)

        if gpex.any() == 0.0:
            return np.mean(gppex) - np.mean(gpp / gp)
        else:
            return np.mean(gppex / gpex) - np.mean(gpp / gp)

    # Define the constraint as a dictionary
    cons = {"type": "eq", "fun": constraint}

    # Solve the optimization problem
    result = minimize(
        objective,
        x0,
        method="SLSQP",
        bounds=bounds,
        constraints=cons,
        options={"ftol": 1e-10},
    )

    # Extract the optimized values
    g1, g2, tau = result.x

    return g1, g2, tau

MRI2FE.MRE.map_MRE_to_mesh(mdl, label_img, region_properties, target_region_id=4, label_background_id=0, region_prefix=None, imgout=None)

Map MRE properties onto FE mesh and store associated material properties in the model material array

Parameters:

Name Type Description Default
mdl FEModel

FE model to map MRE regions onto

required
label_img ANTsImage

MRI image with integer labels for each MRE region

required
region_properties List

List of prony series properties for each MRE region

required
target_region_id int

Target PID in the FE model to replace with segmented MRE IDs .

4
label_background_id int

Integer label in the label_img associated with the background. Defaults to 0.

0
region_prefix str

name prefix for the new part assignment names

None

Raises:

Type Description
TypeError

Input not the right type

ValueError

negative target region

Returns:

Name Type Description
FEModel FEModel

Model with updated PIDs for the target region and material properties added

Source code in src/MRI2FE/MRE/MRE_mapping.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def map_MRE_to_mesh(
    mdl: FEModel,
    label_img: ANTsImage,
    region_properties: List,
    target_region_id: int = 4,
    label_background_id: int = 0,
    region_prefix: Optional[str] = None,
    imgout: Optional[str] = None,
) -> FEModel:
    """Map MRE properties onto FE mesh and store associated material properties in the model material array

    Args:
        mdl (FEModel): FE model to map MRE regions onto
        label_img (ANTsImage): MRI image with integer labels for each MRE region
        region_properties (List): List of prony series properties for each MRE region
        target_region_id (int, optional): Target PID in the FE model to replace with segmented MRE IDs   .
        label_background_id (int, optional): Integer label in the label_img associated with the background. Defaults to 0.
        region_prefix (str, optional): name prefix for the new part assignment names

    Raises:
        TypeError: Input not the right type
        ValueError: negative target region

    Returns:
        FEModel: Model with updated PIDs for the target region and material properties added
    """
    if not isinstance(mdl, FEModel):
        raise TypeError("mdl must be a FEModel object")

    if not isinstance(label_img, ANTsImage):
        raise TypeError("map must be an ANTsImage object")

    if not isinstance(target_region_id, int):
        raise TypeError("offset must be an integer")
    if target_region_id < 0:
        raise ValueError("offset must be non-negative")

    # check for centroids
    if mdl.centroid_table is None:
        mdl.update_centroids()
        assert mdl.centroid_table is not None

    label_img_long = spatial_map(label_img)

    # find max ID in existing model to use as offset
    max_id = np.max(mdl.element_table[:, 1])

    # create filtered space map and centroids for ROI only
    label_img_long_nobackground = label_img_long[
        label_img_long[:, 3] != label_background_id
    ]

    # find elements and centroids within target label region
    ect_region_mask = mdl.element_table[:, 1] == target_region_id

    elcentroids_ROI = mdl.centroid_table[ect_region_mask, :]

    # create KDTree
    physical_space_tree = sp.KDTree(
        label_img_long_nobackground[:, 0:3], leafsize=15
    )

    d, idx = physical_space_tree.query(elcentroids_ROI, k=1)
    # calculate correct PID offset
    if max_id == target_region_id:
        offset = max_id - 1
    else:
        offset = max_id

    new_pids = label_img_long_nobackground[idx, 3] + offset

    mdl.element_table[ect_region_mask, 1] = new_pids

    # map part IDs and material ids

    for idx, mat in enumerate(region_properties):
        if idx is not label_background_id:
            mat_id = idx + offset

            # create part
            if region_prefix is not None:
                mdl.part_info[str(mat_id)] = {
                    "name": f"{region_prefix}_{idx}",
                    "constants": [mat_id],
                }
            else:
                mdl.part_info[str(mat_id)] = {
                    "name": f"{target_region_id}_{idx}",
                    "constants": [mat_id],
                }

            # create material
            if region_prefix is not None:
                mdl.material_info.append(
                    {
                        "type": "KELVIN-MAXWELL_VISCOELASTIC",
                        "name": f"{target_region_id}_{idx}",
                        "ID": mat_id,
                        "constants": [0, 0] + list(mat),
                    }
                )
            else:
                mdl.material_info.append(
                    {
                        "type": "KELVIN-MAXWELL_VISCOELASTIC",
                        "ID": mat_id,
                        "constants": [0, 0] + list(mat),
                    }
                )

    return mdl

Model class and I/O

MRI2FE.FEModel

Model object storing the generated FE model.

Source code in src/MRI2FE/models/femodel.py
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
class FEModel:
    """Model object storing the generated FE model."""

    def __init__(
        self,
        title: str = "",
        source: str = "",
        imgout: Optional[str] = None,
        nodes: Optional[ArrayLike] = None,
        elements: Optional[ArrayLike] = None,
        parts: Optional[dict] = None,
        materials: Optional[List[dict]] = None,
        sections: Optional[List[dict]] = None,
    ):
        """Initialize the FE model

        Args:
            title (str, optional): Name for model, will be inserted as solver deck title on output. Defaults to "".
            source (str, optional): Source file for model, optional. Defaults to "".
            imgout (str, optional): Directory to save validation images to.  Defaults to None.
            nodes (Union[list, np.ndarray], optional): Array with shape (n,4) containing node ID, x, y, z coordinates. Defaults to None.
            elements (Union[list, np.ndarray], optional): Array with shape (n,m+2) containing element ID, part/group ID, and m connected nodes. Defaults to None.
            parts (dict, optional): Part definitions.  Dictionary keys are the part ID.  Each key is linked to a dictionary with entries "name" and "constants". Defaults to None.
            materials (List[dict], optional): Material definitions.  List of dictionaries.  Each dictionary must contain the keys "name", "ID", and "constants". Defaults to None.
            sections (List[dict], optional): Material section definitions.  List of dictionaries.  Each dictionary must contain the keys "ID" and "constants". Defaults to None.

        Raises:
            ValueError: Wrong input type.
        """
        self.metadata = {
            "title": title,
            "source": source,
            "num_nodes": 0,
            "num_elements": 0,
            "imgout": imgout,
        }

        self.centroid_table = None

        # create node table - array of shape (n,4) where each column has the format: [node_id, x, y, z]
        if nodes is not None:
            if isinstance(nodes, list):
                self.node_table: np.ndarray = np.array(nodes)
            elif isinstance(nodes, np.ndarray):
                self.node_table = nodes
            else:
                raise ValueError("Nodes must be a list or numpy array")

            self.metadata["num_nodes"] = self.node_table.shape[0]
        else:
            self.node_table = np.array([])
            self.metadata["num_nodes"] = 0

        # create element table - array of shape (n,m+2) where m is the number of nodes in the element type: [element_id, part_id, node1, node2, node3, node4...]
        if elements is not None:
            if isinstance(elements, np.ndarray):
                self.element_table = elements
            elif isinstance(elements, list):
                self.element_table = np.array(elements)
            else:
                raise ValueError("Elements must be a list or numpy array")
            self.metadata["num_elements"] = self.element_table.shape[0]
        else:
            self.element_table = np.array([])
            self.metadata["num_elements"] = 0

        # create centroid table - List of centroids: [x,y,z]
        self.centroid_table = None

        # create part info - Dictionary with keys "part id" and dictionary of "name":str and "constants":list
        if parts is not None:
            if isinstance(parts, dict):
                self.part_info = parts
            else:
                raise ValueError("Parts must be a dictionary")
        else:
            self.part_info = {}

        # create material info - List of Dictionaries with three entries: "type":str, "ID":int, and "constants":list[int,float]
        if materials is not None:
            if isinstance(materials, list):
                self.material_info = materials
            else:
                raise ValueError("Materials must be a list of dictionaries")
        else:
            self.material_info = []

        # create section info - List of Dictionaries with two entries: "ID": str and "constants":list[int, float]
        if sections is not None:
            if isinstance(sections, list):
                self.section_info = sections
            else:
                raise ValueError("Sections must be a list of dictionaries")
        else:
            self.section_info = []

    def add_nodes(
        self,
        node_id: Optional[int] = None,
        x: Optional[float] = None,
        y: Optional[float] = None,
        z: Optional[float] = None,
        node_array: Optional[ArrayLike] = None,
        force_insert: bool = False,
    ):
        """Add a node to the node table

        Args:
            node_id (int, optional): ID of the new node to add, must provide x-, y- and z- coordinates if specified. Defaults to None.
            x (float, optional): x-coordinate of the node, must also provide y- and z-coordinates. Defaults to None.
            y (float, optional): y-coordinate of the node, must also provide x- and z-coordinates. Defaults to None.
            z (float, optional): z-coordinate of the node, must also provide x- and y-coordinates. Defaults to None.
            node_array (np.ndarray, optional): (4,) array containing node_id,x,y,z coordinates. Defaults to None.
            force_insert (bool, optional): Whether to overwrite existing node with the same ID. Defaults to False.

        Raises:
            ValueError: Wrong input type.
            ValueError: Wrong array size.
            ValueError: Node ID already exists and force_insert false.
        """
        # check which input type is provided
        if all(var is not None for var in [node_id, x, y, z]):
            indiv_input = True
            node_id_list = np.array([node_id])

        elif node_array is not None:
            indiv_input = False
            node_array = np.atleast_2d(node_array)
            node_id_list = node_array[:, 0]

        else:
            raise ValueError(
                "Must provide either (node_id,x,y,z) or node_array"
            )

        node_array = np.asarray(node_array)
        # check array dimensions match
        if (
            not indiv_input
            and node_array is not None
            and node_array.shape[1] != self.node_table.shape[1]
        ):
            raise ValueError(
                "Node array dimensions do not match node table dimensions"
            )

        if not force_insert and self.node_table.size > 0:
            # check if node already in the table
            node_table = np.atleast_2d(self.node_table)
            node_table = node_table[:, 0]
            matches = np.intersect1d(node_id_list, node_table)

            if len(matches) > 0:
                raise ValueError(
                    f"The following inserted nodes have duplicate IDs: {matches}"
                )

        # if node array is none, inserted nodes becomes array
        if self.node_table.size == 0 and indiv_input:
            self.node_table = np.array([node_id, x, y, z])
            self.metadata["num_nodes"] = 1

        elif self.node_table.size == 0:
            self.node_table = node_array
            self.metadata["num_nodes"] = node_array.shape[0]

        # add nodes to node table
        elif indiv_input:
            self.node_table = np.row_stack(
                (self.node_table, np.array([node_id, x, y, z]))
            )

            current_count = cast(int, self.metadata.get("num_nodes", 0))
            self.metadata["num_nodes"] = current_count + 1
        else:
            assert node_array is not None
            self.node_table = np.row_stack((self.node_table, node_array))

            current_count = cast(int, self.metadata.get("num_nodes", 0))
            self.metadata["num_nodes"] = current_count + node_array.shape[0]

    def add_elements(
        self,
        element_id: Optional[int] = None,
        part_id: Optional[int] = None,
        nodes: Optional[list] = None,
        element_array: Optional[np.ndarray] = None,
        force_insert: bool = False,
    ):
        """Add an element to the element table.

        Args:
            element_id (int, optional): Element ID, must also provide part_id and nodes if not None. Defaults to None.
            part_id (int, optional): Connected part ID, must also provide element_id and nodes if not None. Defaults to None.
            nodes (list, optional): Connected node IDs, must also provide element_id and part_id. Defaults to None.
            element_array (np.ndarray, optional): Array of shape (n+2,) containing element_id, part_id, n connected node ids. Defaults to None.
            force_insert (bool, optional): Whether to overwrite if element already exists with the same ID. Defaults to False.

        Raises:
            ValueError: Wrong input type.
            ValueError: Input shape mismatch with element table.
            ValueError: Element ID already exists and force_insert is False.
        """

        if all(var is not None for var in [element_id, part_id, nodes]):
            indiv_input = True
            element_id_list = [element_id]

        elif element_array is not None:
            indiv_input = False

            element_array = np.atleast_2d(element_array)
            element_id_list = element_array[:, 0].tolist()

        else:
            raise ValueError(
                "Must provide either (element_id, part_id, nodes) or element_array"
            )

        # check if array dimensions match
        if (
            not indiv_input
            and element_array is not None
            and not element_array.shape[1] == self.element_table.shape[1]
        ):
            raise ValueError(
                "Element array dimensions do not match self.element_table dimensions"
            )

        # check if element already exists
        if not force_insert and self.element_table.size > 0:
            # check if node already in the table
            element_table = np.atleast_2d(self.element_table)
            element_table = element_table[:, 0]
            matches = np.intersect1d(np.array(element_id_list), element_table)

            if len(matches) > 0:
                raise ValueError(
                    f"The following inserted nodes have duplicate IDs: {matches}"
                )

        # if element array is none, inserted element becomes array
        if self.element_table.size == 0 and indiv_input:
            assert nodes is not None
            self.element_table = np.array([element_id, part_id] + nodes)
            self.metadata["num_elements"] = 1

        elif self.element_table.size == 0:
            assert element_array is not None

            self.element_table = element_array
            self.metadata["num_elements"] = element_array.shape[0]

        elif indiv_input and nodes is not None:
            row_insert = np.array([element_id, part_id] + nodes)
            self.element_table = np.row_stack((self.element_table, row_insert))

            current_count = cast(int, self.metadata.get("num_elements", 0))
            self.metadata["num_elements"] = current_count + 1
        elif element_array is not None:
            self.element_table = np.row_stack(
                (self.element_table, element_array)
            )

            current_count = cast(int, self.metadata.get("num_elements", 0))
            self.metadata["num_elements"] = (
                current_count + element_array.shape[0]
            )

    def update_centroids(self):
        """Update the centroid table with all elements in the element table."""
        if self.element_table.size > 0:
            node_dict = {
                int(self.node_table[i, 0]): i
                for i in range(len(self.node_table))
            }
            n_elements = len(self.element_table)
            centroids = np.zeros((n_elements, 3))

            for i, element in enumerate(self.element_table):
                node_ids = np.unique(element[2:])
                node_indices = [
                    node_dict[int(node_id)] for node_id in node_ids
                ]
                centroids[i] = np.mean(
                    self.node_table[node_indices, 1:], axis=0
                )
            self.centroid_table = centroids

    def add_part(self, part_id: int, name: str, material_constants: list):
        """Add part information (e.g., material constants)."""
        self.part_info[part_id] = {}
        self.part_info[part_id]["name"] = name
        self.part_info[part_id]["constants"] = material_constants

    def get_part_info(self):
        """Return the part information."""
        return self.part_info

    def __repr__(self):
        """String representation of the FEModel."""
        return (
            f"FEModel(title={self.metadata['title']}, "
            f"source={self.metadata['source']}, "
            f"num_nodes={self.metadata['num_nodes']}, "
            f"num_elements={self.metadata['num_elements']})"
        )

    def write_lsdyna(self, filename: str):
        """
        Write FE model data to an LS-DYNA .k file.

        Args:
            filename (str): Output file path for the .k file.
        """
        with open(filename, "w") as f:
            f.write("*KEYWORD\n")
            f.write(f"*TITLE\n{self.metadata['title']}\n")

            # Write nodes
            f.write("*NODE\n")
            for row in self.node_table:
                f.write(
                    f"{int(row[0]):8d}{row[1]:16.6f}{row[2]:16.6f}{row[3]:16.6f}\n"
                )

            # Write elements
            f.write("*ELEMENT_SOLID\n")
            for row in self.element_table:
                f.write(
                    f"{int(row[0]):>8d}{int(row[1]):>8d}\n"
                )  # eid and part id

                # write element connectivity, padding to 10-node format
                for i in range(2, len(row)):
                    f.write(f"{row[i]:>8d}")

                # Pad with last valid node or a dummy valid node ID (ex. repeat last node)
                last_node = row[-1]
                for i in range(len(row), 10):
                    f.write(f"{last_node:>8d}")

                # zero padding for n9 and n10
                f.write(f"{0:>8d}{0:>8d}")

                f.write("\n")

            # Writing parts
            for id, part in self.part_info.items():
                f.write("*PART\n")
                f.write(part["name"] + "\n")
                part_insert = [0, 0, 0, 0, 0, 0, 0, 0]
                part_insert[0] = int(id)
                # update default part with all available information
                for idx, item in enumerate(part["constants"]):
                    part_insert[idx + 1] = item

                for item in part_insert:
                    f.write(f"{item:>10d}")
                f.write("\n")

            # Write solid sections
            for sec in self.section_info:
                f.write("*SECTION_SOLID\n")
                secid = sec["ID"]
                elform = sec["constants"][0]
                aet = 0
                if len(sec["constants"]) > 1:
                    aet = sec["constants"][1]
                f.write(
                    f"{secid:>10d}{elform:>10d}{aet:10d}{0.0:>40.1f}{0.0:>10.1f}\n"
                )

            # Write materials
            for mat in self.material_info:
                mat_type = mat["type"]
                mat_id = mat["ID"]
                props = mat["constants"]

                # check if multi-line input card, raise error if yes
                if len(props) > 7:
                    raise ValueError(
                        f"Error in material id {mat_id}: multi-line input cards not supported"
                    )

                mat_insert = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
                mat_insert[0] = mat_id
                for idx, item in enumerate(props):
                    mat_insert[idx + 1] = item

                if "name" in mat:
                    f.write(f"*MAT_{mat_type}_TITLE\n")
                    f.write(f"{mat['name']}\n")
                else:
                    f.write(f"*MAT_{mat_type}\n")
                for item in mat_insert:
                    # convert from numpy types
                    if hasattr(
                        item, "item"
                    ):  # numpy scalars have .item() method
                        item = item.item()

                    # check and write type
                    if isinstance(item, int):
                        f.write(f"{item:>10d}")
                    elif isinstance(item, float) and item == 0.0:
                        f.write(f"{0.0:>10.1f}")
                    elif isinstance(item, float):
                        f.write(f"{item:>10.2E}")
                    else:
                        raise ValueError(
                            f"Unsupported type in material constants: {type(item)}"
                        )

                f.write("\n")

            # End of file
            f.write("*END\n")

    def from_meshio(
        self,
        mesh: Union[meshio.Mesh, str],
        element_type: Literal["tetra"] = "tetra",
        region_names: Optional[List[str]] = None,
    ):
        """Append mesh data from a meshio mesh object or file to the FEModel, offsetting IDs and
        updating metadata.

        Args:
            mesh (meshio.Mesh, str): Mesh object or filepath to mesh file.
            element_type ("tetra", optional): Element type to import. Only "tetra" is supported.
            region_names (List[str], optional): Optional part names for each region. Defaults to None.

        Raises:
            ValueError: If the element type of the new mesh does not match the existing FEModel.
            ValueError: If no elements of the specified type are found in the mesh.
        """
        if isinstance(mesh, str):
            mesh = meshio.read(mesh)

        # Check element type compatibility
        if self.element_table is not None and self.element_table.size > 0:
            # Assume all elements in self.element_table are of the same type
            num_nodes_per_elem = self.element_table.shape[1] - 2
            if element_type == "tetra" and num_nodes_per_elem != 4:
                raise ValueError(
                    "Element type mismatch: existing elements are not tetrahedrons."
                )

        mesh_nodes = mesh.points
        n_points = mesh_nodes.shape[0]
        # Offset node IDs
        max_node_id = (
            int(np.max(self.node_table[:, 0]))
            if self.node_table is not None and self.node_table.size > 0
            else 0
        )
        nids = np.arange(1, n_points + 1) + max_node_id
        new_nodes = np.column_stack((nids, mesh_nodes))

        # Find an element type in cells
        connectivity_index = None
        node_connectivity = None
        for idx, item in enumerate(mesh.cells):
            if item.type == element_type:
                # Map 0-based mesh indices to our new node IDs
                node_connectivity = nids[item.data]
                connectivity_index = idx
                break
        if node_connectivity is None:
            raise ValueError(
                f"Element type {element_type} not found in mesh cells"
            )

        n_elements = node_connectivity.shape[0]
        max_elem_id = (
            int(np.max(self.element_table[:, 0]))
            if self.element_table is not None and self.element_table.size > 0
            else 0
        )
        eids = np.arange(1, n_elements + 1) + max_elem_id

        # Offset part IDs
        if "medit:ref" not in mesh.cell_data:
            raise ValueError(
                "Mesh must contain 'medit:ref' cell data for part IDs"
            )

        pid = mesh.cell_data["medit:ref"][connectivity_index]
        max_part_id = (
            max([int(k) for k in self.part_info.keys()])
            if self.part_info
            else 0
        )
        pid_offset = max_part_id

        new_pid = pid + pid_offset
        new_elements = np.column_stack((eids, new_pid, node_connectivity))

        # Filter pid zero
        mask = new_pid > 0
        new_elements = new_elements[mask, :]
        new_pid_filtered = new_pid[mask]

        # all nodes are used
        new_nodes = new_nodes

        # Update part_info - only for parts that have elements (after filtering)
        for idx, id in enumerate(np.unique(new_pid_filtered)):
            if region_names is not None and idx < len(region_names):
                self.part_info[str(id)] = {
                    "name": region_names[idx],
                    "constants": [],
                }
            else:
                self.part_info[str(id)] = {"name": str(id), "constants": []}

        # Link node and element tables
        if self.node_table is not None and self.node_table.size > 0:
            self.node_table = np.row_stack((self.node_table, new_nodes))
        else:
            self.node_table = new_nodes

        if self.element_table is not None and self.element_table.size > 0:
            self.element_table = np.row_stack(
                (self.element_table, new_elements)
            )
        else:
            self.element_table = new_elements

        # Update metadata
        self.metadata["num_nodes"] = self.node_table.shape[0]
        self.metadata["num_elements"] = self.element_table.shape[0]
        if "source" in self.metadata and self.metadata["source"]:
            self.metadata["source"] = str(self.metadata["source"]) + (
                f", meshio:{getattr(mesh, 'filename', 'unknown')}"
            )
        else:
            self.metadata["source"] = (
                f"meshio:{getattr(mesh, 'filename', 'unknown')}"
            )

__init__(title='', source='', imgout=None, nodes=None, elements=None, parts=None, materials=None, sections=None)

Initialize the FE model

Parameters:

Name Type Description Default
title str

Name for model, will be inserted as solver deck title on output. Defaults to "".

''
source str

Source file for model, optional. Defaults to "".

''
imgout str

Directory to save validation images to. Defaults to None.

None
nodes Union[list, ndarray]

Array with shape (n,4) containing node ID, x, y, z coordinates. Defaults to None.

None
elements Union[list, ndarray]

Array with shape (n,m+2) containing element ID, part/group ID, and m connected nodes. Defaults to None.

None
parts dict

Part definitions. Dictionary keys are the part ID. Each key is linked to a dictionary with entries "name" and "constants". Defaults to None.

None
materials List[dict]

Material definitions. List of dictionaries. Each dictionary must contain the keys "name", "ID", and "constants". Defaults to None.

None
sections List[dict]

Material section definitions. List of dictionaries. Each dictionary must contain the keys "ID" and "constants". Defaults to None.

None

Raises:

Type Description
ValueError

Wrong input type.

Source code in src/MRI2FE/models/femodel.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    title: str = "",
    source: str = "",
    imgout: Optional[str] = None,
    nodes: Optional[ArrayLike] = None,
    elements: Optional[ArrayLike] = None,
    parts: Optional[dict] = None,
    materials: Optional[List[dict]] = None,
    sections: Optional[List[dict]] = None,
):
    """Initialize the FE model

    Args:
        title (str, optional): Name for model, will be inserted as solver deck title on output. Defaults to "".
        source (str, optional): Source file for model, optional. Defaults to "".
        imgout (str, optional): Directory to save validation images to.  Defaults to None.
        nodes (Union[list, np.ndarray], optional): Array with shape (n,4) containing node ID, x, y, z coordinates. Defaults to None.
        elements (Union[list, np.ndarray], optional): Array with shape (n,m+2) containing element ID, part/group ID, and m connected nodes. Defaults to None.
        parts (dict, optional): Part definitions.  Dictionary keys are the part ID.  Each key is linked to a dictionary with entries "name" and "constants". Defaults to None.
        materials (List[dict], optional): Material definitions.  List of dictionaries.  Each dictionary must contain the keys "name", "ID", and "constants". Defaults to None.
        sections (List[dict], optional): Material section definitions.  List of dictionaries.  Each dictionary must contain the keys "ID" and "constants". Defaults to None.

    Raises:
        ValueError: Wrong input type.
    """
    self.metadata = {
        "title": title,
        "source": source,
        "num_nodes": 0,
        "num_elements": 0,
        "imgout": imgout,
    }

    self.centroid_table = None

    # create node table - array of shape (n,4) where each column has the format: [node_id, x, y, z]
    if nodes is not None:
        if isinstance(nodes, list):
            self.node_table: np.ndarray = np.array(nodes)
        elif isinstance(nodes, np.ndarray):
            self.node_table = nodes
        else:
            raise ValueError("Nodes must be a list or numpy array")

        self.metadata["num_nodes"] = self.node_table.shape[0]
    else:
        self.node_table = np.array([])
        self.metadata["num_nodes"] = 0

    # create element table - array of shape (n,m+2) where m is the number of nodes in the element type: [element_id, part_id, node1, node2, node3, node4...]
    if elements is not None:
        if isinstance(elements, np.ndarray):
            self.element_table = elements
        elif isinstance(elements, list):
            self.element_table = np.array(elements)
        else:
            raise ValueError("Elements must be a list or numpy array")
        self.metadata["num_elements"] = self.element_table.shape[0]
    else:
        self.element_table = np.array([])
        self.metadata["num_elements"] = 0

    # create centroid table - List of centroids: [x,y,z]
    self.centroid_table = None

    # create part info - Dictionary with keys "part id" and dictionary of "name":str and "constants":list
    if parts is not None:
        if isinstance(parts, dict):
            self.part_info = parts
        else:
            raise ValueError("Parts must be a dictionary")
    else:
        self.part_info = {}

    # create material info - List of Dictionaries with three entries: "type":str, "ID":int, and "constants":list[int,float]
    if materials is not None:
        if isinstance(materials, list):
            self.material_info = materials
        else:
            raise ValueError("Materials must be a list of dictionaries")
    else:
        self.material_info = []

    # create section info - List of Dictionaries with two entries: "ID": str and "constants":list[int, float]
    if sections is not None:
        if isinstance(sections, list):
            self.section_info = sections
        else:
            raise ValueError("Sections must be a list of dictionaries")
    else:
        self.section_info = []

__repr__()

String representation of the FEModel.

Source code in src/MRI2FE/models/femodel.py
307
308
309
310
311
312
313
314
def __repr__(self):
    """String representation of the FEModel."""
    return (
        f"FEModel(title={self.metadata['title']}, "
        f"source={self.metadata['source']}, "
        f"num_nodes={self.metadata['num_nodes']}, "
        f"num_elements={self.metadata['num_elements']})"
    )

add_elements(element_id=None, part_id=None, nodes=None, element_array=None, force_insert=False)

Add an element to the element table.

Parameters:

Name Type Description Default
element_id int

Element ID, must also provide part_id and nodes if not None. Defaults to None.

None
part_id int

Connected part ID, must also provide element_id and nodes if not None. Defaults to None.

None
nodes list

Connected node IDs, must also provide element_id and part_id. Defaults to None.

None
element_array ndarray

Array of shape (n+2,) containing element_id, part_id, n connected node ids. Defaults to None.

None
force_insert bool

Whether to overwrite if element already exists with the same ID. Defaults to False.

False

Raises:

Type Description
ValueError

Wrong input type.

ValueError

Input shape mismatch with element table.

ValueError

Element ID already exists and force_insert is False.

Source code in src/MRI2FE/models/femodel.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def add_elements(
    self,
    element_id: Optional[int] = None,
    part_id: Optional[int] = None,
    nodes: Optional[list] = None,
    element_array: Optional[np.ndarray] = None,
    force_insert: bool = False,
):
    """Add an element to the element table.

    Args:
        element_id (int, optional): Element ID, must also provide part_id and nodes if not None. Defaults to None.
        part_id (int, optional): Connected part ID, must also provide element_id and nodes if not None. Defaults to None.
        nodes (list, optional): Connected node IDs, must also provide element_id and part_id. Defaults to None.
        element_array (np.ndarray, optional): Array of shape (n+2,) containing element_id, part_id, n connected node ids. Defaults to None.
        force_insert (bool, optional): Whether to overwrite if element already exists with the same ID. Defaults to False.

    Raises:
        ValueError: Wrong input type.
        ValueError: Input shape mismatch with element table.
        ValueError: Element ID already exists and force_insert is False.
    """

    if all(var is not None for var in [element_id, part_id, nodes]):
        indiv_input = True
        element_id_list = [element_id]

    elif element_array is not None:
        indiv_input = False

        element_array = np.atleast_2d(element_array)
        element_id_list = element_array[:, 0].tolist()

    else:
        raise ValueError(
            "Must provide either (element_id, part_id, nodes) or element_array"
        )

    # check if array dimensions match
    if (
        not indiv_input
        and element_array is not None
        and not element_array.shape[1] == self.element_table.shape[1]
    ):
        raise ValueError(
            "Element array dimensions do not match self.element_table dimensions"
        )

    # check if element already exists
    if not force_insert and self.element_table.size > 0:
        # check if node already in the table
        element_table = np.atleast_2d(self.element_table)
        element_table = element_table[:, 0]
        matches = np.intersect1d(np.array(element_id_list), element_table)

        if len(matches) > 0:
            raise ValueError(
                f"The following inserted nodes have duplicate IDs: {matches}"
            )

    # if element array is none, inserted element becomes array
    if self.element_table.size == 0 and indiv_input:
        assert nodes is not None
        self.element_table = np.array([element_id, part_id] + nodes)
        self.metadata["num_elements"] = 1

    elif self.element_table.size == 0:
        assert element_array is not None

        self.element_table = element_array
        self.metadata["num_elements"] = element_array.shape[0]

    elif indiv_input and nodes is not None:
        row_insert = np.array([element_id, part_id] + nodes)
        self.element_table = np.row_stack((self.element_table, row_insert))

        current_count = cast(int, self.metadata.get("num_elements", 0))
        self.metadata["num_elements"] = current_count + 1
    elif element_array is not None:
        self.element_table = np.row_stack(
            (self.element_table, element_array)
        )

        current_count = cast(int, self.metadata.get("num_elements", 0))
        self.metadata["num_elements"] = (
            current_count + element_array.shape[0]
        )

add_nodes(node_id=None, x=None, y=None, z=None, node_array=None, force_insert=False)

Add a node to the node table

Parameters:

Name Type Description Default
node_id int

ID of the new node to add, must provide x-, y- and z- coordinates if specified. Defaults to None.

None
x float

x-coordinate of the node, must also provide y- and z-coordinates. Defaults to None.

None
y float

y-coordinate of the node, must also provide x- and z-coordinates. Defaults to None.

None
z float

z-coordinate of the node, must also provide x- and y-coordinates. Defaults to None.

None
node_array ndarray

(4,) array containing node_id,x,y,z coordinates. Defaults to None.

None
force_insert bool

Whether to overwrite existing node with the same ID. Defaults to False.

False

Raises:

Type Description
ValueError

Wrong input type.

ValueError

Wrong array size.

ValueError

Node ID already exists and force_insert false.

Source code in src/MRI2FE/models/femodel.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def add_nodes(
    self,
    node_id: Optional[int] = None,
    x: Optional[float] = None,
    y: Optional[float] = None,
    z: Optional[float] = None,
    node_array: Optional[ArrayLike] = None,
    force_insert: bool = False,
):
    """Add a node to the node table

    Args:
        node_id (int, optional): ID of the new node to add, must provide x-, y- and z- coordinates if specified. Defaults to None.
        x (float, optional): x-coordinate of the node, must also provide y- and z-coordinates. Defaults to None.
        y (float, optional): y-coordinate of the node, must also provide x- and z-coordinates. Defaults to None.
        z (float, optional): z-coordinate of the node, must also provide x- and y-coordinates. Defaults to None.
        node_array (np.ndarray, optional): (4,) array containing node_id,x,y,z coordinates. Defaults to None.
        force_insert (bool, optional): Whether to overwrite existing node with the same ID. Defaults to False.

    Raises:
        ValueError: Wrong input type.
        ValueError: Wrong array size.
        ValueError: Node ID already exists and force_insert false.
    """
    # check which input type is provided
    if all(var is not None for var in [node_id, x, y, z]):
        indiv_input = True
        node_id_list = np.array([node_id])

    elif node_array is not None:
        indiv_input = False
        node_array = np.atleast_2d(node_array)
        node_id_list = node_array[:, 0]

    else:
        raise ValueError(
            "Must provide either (node_id,x,y,z) or node_array"
        )

    node_array = np.asarray(node_array)
    # check array dimensions match
    if (
        not indiv_input
        and node_array is not None
        and node_array.shape[1] != self.node_table.shape[1]
    ):
        raise ValueError(
            "Node array dimensions do not match node table dimensions"
        )

    if not force_insert and self.node_table.size > 0:
        # check if node already in the table
        node_table = np.atleast_2d(self.node_table)
        node_table = node_table[:, 0]
        matches = np.intersect1d(node_id_list, node_table)

        if len(matches) > 0:
            raise ValueError(
                f"The following inserted nodes have duplicate IDs: {matches}"
            )

    # if node array is none, inserted nodes becomes array
    if self.node_table.size == 0 and indiv_input:
        self.node_table = np.array([node_id, x, y, z])
        self.metadata["num_nodes"] = 1

    elif self.node_table.size == 0:
        self.node_table = node_array
        self.metadata["num_nodes"] = node_array.shape[0]

    # add nodes to node table
    elif indiv_input:
        self.node_table = np.row_stack(
            (self.node_table, np.array([node_id, x, y, z]))
        )

        current_count = cast(int, self.metadata.get("num_nodes", 0))
        self.metadata["num_nodes"] = current_count + 1
    else:
        assert node_array is not None
        self.node_table = np.row_stack((self.node_table, node_array))

        current_count = cast(int, self.metadata.get("num_nodes", 0))
        self.metadata["num_nodes"] = current_count + node_array.shape[0]

add_part(part_id, name, material_constants)

Add part information (e.g., material constants).

Source code in src/MRI2FE/models/femodel.py
297
298
299
300
301
def add_part(self, part_id: int, name: str, material_constants: list):
    """Add part information (e.g., material constants)."""
    self.part_info[part_id] = {}
    self.part_info[part_id]["name"] = name
    self.part_info[part_id]["constants"] = material_constants

from_meshio(mesh, element_type='tetra', region_names=None)

Append mesh data from a meshio mesh object or file to the FEModel, offsetting IDs and updating metadata.

Parameters:

Name Type Description Default
mesh (Mesh, str)

Mesh object or filepath to mesh file.

required
element_type tetra

Element type to import. Only "tetra" is supported.

'tetra'
region_names List[str]

Optional part names for each region. Defaults to None.

None

Raises:

Type Description
ValueError

If the element type of the new mesh does not match the existing FEModel.

ValueError

If no elements of the specified type are found in the mesh.

Source code in src/MRI2FE/models/femodel.py
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
def from_meshio(
    self,
    mesh: Union[meshio.Mesh, str],
    element_type: Literal["tetra"] = "tetra",
    region_names: Optional[List[str]] = None,
):
    """Append mesh data from a meshio mesh object or file to the FEModel, offsetting IDs and
    updating metadata.

    Args:
        mesh (meshio.Mesh, str): Mesh object or filepath to mesh file.
        element_type ("tetra", optional): Element type to import. Only "tetra" is supported.
        region_names (List[str], optional): Optional part names for each region. Defaults to None.

    Raises:
        ValueError: If the element type of the new mesh does not match the existing FEModel.
        ValueError: If no elements of the specified type are found in the mesh.
    """
    if isinstance(mesh, str):
        mesh = meshio.read(mesh)

    # Check element type compatibility
    if self.element_table is not None and self.element_table.size > 0:
        # Assume all elements in self.element_table are of the same type
        num_nodes_per_elem = self.element_table.shape[1] - 2
        if element_type == "tetra" and num_nodes_per_elem != 4:
            raise ValueError(
                "Element type mismatch: existing elements are not tetrahedrons."
            )

    mesh_nodes = mesh.points
    n_points = mesh_nodes.shape[0]
    # Offset node IDs
    max_node_id = (
        int(np.max(self.node_table[:, 0]))
        if self.node_table is not None and self.node_table.size > 0
        else 0
    )
    nids = np.arange(1, n_points + 1) + max_node_id
    new_nodes = np.column_stack((nids, mesh_nodes))

    # Find an element type in cells
    connectivity_index = None
    node_connectivity = None
    for idx, item in enumerate(mesh.cells):
        if item.type == element_type:
            # Map 0-based mesh indices to our new node IDs
            node_connectivity = nids[item.data]
            connectivity_index = idx
            break
    if node_connectivity is None:
        raise ValueError(
            f"Element type {element_type} not found in mesh cells"
        )

    n_elements = node_connectivity.shape[0]
    max_elem_id = (
        int(np.max(self.element_table[:, 0]))
        if self.element_table is not None and self.element_table.size > 0
        else 0
    )
    eids = np.arange(1, n_elements + 1) + max_elem_id

    # Offset part IDs
    if "medit:ref" not in mesh.cell_data:
        raise ValueError(
            "Mesh must contain 'medit:ref' cell data for part IDs"
        )

    pid = mesh.cell_data["medit:ref"][connectivity_index]
    max_part_id = (
        max([int(k) for k in self.part_info.keys()])
        if self.part_info
        else 0
    )
    pid_offset = max_part_id

    new_pid = pid + pid_offset
    new_elements = np.column_stack((eids, new_pid, node_connectivity))

    # Filter pid zero
    mask = new_pid > 0
    new_elements = new_elements[mask, :]
    new_pid_filtered = new_pid[mask]

    # all nodes are used
    new_nodes = new_nodes

    # Update part_info - only for parts that have elements (after filtering)
    for idx, id in enumerate(np.unique(new_pid_filtered)):
        if region_names is not None and idx < len(region_names):
            self.part_info[str(id)] = {
                "name": region_names[idx],
                "constants": [],
            }
        else:
            self.part_info[str(id)] = {"name": str(id), "constants": []}

    # Link node and element tables
    if self.node_table is not None and self.node_table.size > 0:
        self.node_table = np.row_stack((self.node_table, new_nodes))
    else:
        self.node_table = new_nodes

    if self.element_table is not None and self.element_table.size > 0:
        self.element_table = np.row_stack(
            (self.element_table, new_elements)
        )
    else:
        self.element_table = new_elements

    # Update metadata
    self.metadata["num_nodes"] = self.node_table.shape[0]
    self.metadata["num_elements"] = self.element_table.shape[0]
    if "source" in self.metadata and self.metadata["source"]:
        self.metadata["source"] = str(self.metadata["source"]) + (
            f", meshio:{getattr(mesh, 'filename', 'unknown')}"
        )
    else:
        self.metadata["source"] = (
            f"meshio:{getattr(mesh, 'filename', 'unknown')}"
        )

get_part_info()

Return the part information.

Source code in src/MRI2FE/models/femodel.py
303
304
305
def get_part_info(self):
    """Return the part information."""
    return self.part_info

update_centroids()

Update the centroid table with all elements in the element table.

Source code in src/MRI2FE/models/femodel.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
def update_centroids(self):
    """Update the centroid table with all elements in the element table."""
    if self.element_table.size > 0:
        node_dict = {
            int(self.node_table[i, 0]): i
            for i in range(len(self.node_table))
        }
        n_elements = len(self.element_table)
        centroids = np.zeros((n_elements, 3))

        for i, element in enumerate(self.element_table):
            node_ids = np.unique(element[2:])
            node_indices = [
                node_dict[int(node_id)] for node_id in node_ids
            ]
            centroids[i] = np.mean(
                self.node_table[node_indices, 1:], axis=0
            )
        self.centroid_table = centroids

write_lsdyna(filename)

Write FE model data to an LS-DYNA .k file.

Parameters:

Name Type Description Default
filename str

Output file path for the .k file.

required
Source code in src/MRI2FE/models/femodel.py
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def write_lsdyna(self, filename: str):
    """
    Write FE model data to an LS-DYNA .k file.

    Args:
        filename (str): Output file path for the .k file.
    """
    with open(filename, "w") as f:
        f.write("*KEYWORD\n")
        f.write(f"*TITLE\n{self.metadata['title']}\n")

        # Write nodes
        f.write("*NODE\n")
        for row in self.node_table:
            f.write(
                f"{int(row[0]):8d}{row[1]:16.6f}{row[2]:16.6f}{row[3]:16.6f}\n"
            )

        # Write elements
        f.write("*ELEMENT_SOLID\n")
        for row in self.element_table:
            f.write(
                f"{int(row[0]):>8d}{int(row[1]):>8d}\n"
            )  # eid and part id

            # write element connectivity, padding to 10-node format
            for i in range(2, len(row)):
                f.write(f"{row[i]:>8d}")

            # Pad with last valid node or a dummy valid node ID (ex. repeat last node)
            last_node = row[-1]
            for i in range(len(row), 10):
                f.write(f"{last_node:>8d}")

            # zero padding for n9 and n10
            f.write(f"{0:>8d}{0:>8d}")

            f.write("\n")

        # Writing parts
        for id, part in self.part_info.items():
            f.write("*PART\n")
            f.write(part["name"] + "\n")
            part_insert = [0, 0, 0, 0, 0, 0, 0, 0]
            part_insert[0] = int(id)
            # update default part with all available information
            for idx, item in enumerate(part["constants"]):
                part_insert[idx + 1] = item

            for item in part_insert:
                f.write(f"{item:>10d}")
            f.write("\n")

        # Write solid sections
        for sec in self.section_info:
            f.write("*SECTION_SOLID\n")
            secid = sec["ID"]
            elform = sec["constants"][0]
            aet = 0
            if len(sec["constants"]) > 1:
                aet = sec["constants"][1]
            f.write(
                f"{secid:>10d}{elform:>10d}{aet:10d}{0.0:>40.1f}{0.0:>10.1f}\n"
            )

        # Write materials
        for mat in self.material_info:
            mat_type = mat["type"]
            mat_id = mat["ID"]
            props = mat["constants"]

            # check if multi-line input card, raise error if yes
            if len(props) > 7:
                raise ValueError(
                    f"Error in material id {mat_id}: multi-line input cards not supported"
                )

            mat_insert = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            mat_insert[0] = mat_id
            for idx, item in enumerate(props):
                mat_insert[idx + 1] = item

            if "name" in mat:
                f.write(f"*MAT_{mat_type}_TITLE\n")
                f.write(f"{mat['name']}\n")
            else:
                f.write(f"*MAT_{mat_type}\n")
            for item in mat_insert:
                # convert from numpy types
                if hasattr(
                    item, "item"
                ):  # numpy scalars have .item() method
                    item = item.item()

                # check and write type
                if isinstance(item, int):
                    f.write(f"{item:>10d}")
                elif isinstance(item, float) and item == 0.0:
                    f.write(f"{0.0:>10.1f}")
                elif isinstance(item, float):
                    f.write(f"{item:>10.2E}")
                else:
                    raise ValueError(
                        f"Unsupported type in material constants: {type(item)}"
                    )

            f.write("\n")

        # End of file
        f.write("*END\n")

Utilities

MRI2FE.COM_align(fixed, moving, fixed_mask=None, moving_mask=None)

Align the centers of mass of two point clouds, operating either on the entire point cloud or on a masked region.

Parameters:

Name Type Description Default
fixed ndarray

Reference point cloud to be aligned to.

required
moving ndarray

Point cloud to align with fixed cloud.

required
fixed_mask ndarray

Fixed point cloud subset to define COM by. Defaults to None.

None
moving_mask ndarray

Moving point cloud subset to define COM by. Defaults to None.

None

Raises:

Type Description
TypeError

If inputs are not numpy arrays when provided

ValueError

If required inputs are None or have invalid dimensions

Returns:

Type Description
ndarray

np.ndarray: moving point cloud rigidly aligned to the fixed point cloud COM

Source code in src/MRI2FE/utilities.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def COM_align(
    fixed: np.ndarray,
    moving: np.ndarray,
    fixed_mask: Optional[np.ndarray] = None,
    moving_mask: Optional[np.ndarray] = None,
) -> np.ndarray:
    """Align the centers of mass of two point clouds, operating either on the entire point cloud or on a masked region.

    Args:
        fixed (np.ndarray): Reference point cloud to be aligned to.
        moving (np.ndarray): Point cloud to align with fixed cloud.
        fixed_mask (np.ndarray, optional): Fixed point cloud subset to define COM by. Defaults to None.
        moving_mask (np.ndarray, optional): Moving point cloud subset to define COM by. Defaults to None.

    Raises:
        TypeError: If inputs are not numpy arrays when provided
        ValueError: If required inputs are None or have invalid dimensions

    Returns:
        np.ndarray: moving point cloud rigidly aligned to the fixed point cloud COM
    """
    # Validate required inputs are not None
    if fixed is None:
        raise ValueError("fixed cannot be None")
    if moving is None:
        raise ValueError("moving cannot be None")

    # confirm numpy array input and validate dimensions
    fixed = np.asarray(fixed)
    if len(fixed.shape) != 2:
        raise ValueError("fixed must be a 2D array")

    moving = np.asarray(moving)
    if len(moving.shape) != 2:
        raise ValueError("moving must be a 2D array")

    # Validate matching dimensions
    if fixed.shape[1] != moving.shape[1]:
        raise ValueError(
            "fixed and moving must have the same number of dimensions"
        )

    # Validate masks if provided
    if fixed_mask is not None:
        fixed_mask = np.asarray(fixed_mask)
        if len(fixed_mask.shape) != 2:
            raise ValueError("fixed_mask must be a 2D array")
        if fixed_mask.shape[1] != fixed.shape[1]:
            raise ValueError(
                "fixed_mask must have same number of dimensions as fixed"
            )

    if moving_mask is not None:
        moving_mask = np.asarray(moving_mask)
        if len(moving_mask.shape) != 2:
            raise ValueError("moving_mask must be a 2D array")
        if moving_mask.shape[1] != moving.shape[1]:
            raise ValueError(
                "moving_mask must have same number of dimensions as moving"
            )

    # Calculate Centers of Mass
    if fixed_mask is not None:
        COM_fixed = np.mean(fixed_mask, axis=0)
    else:
        COM_fixed = np.mean(fixed, axis=0)

    if moving_mask is not None:
        COM_moving = np.mean(moving_mask, axis=0)
    else:
        COM_moving = np.mean(moving, axis=0)

    # calculate translation and create transformation matrix
    offset = COM_fixed - COM_moving

    transform = np.array(
        [
            [1, 0, 0, offset[0]],
            [0, 1, 0, offset[1]],
            [0, 0, 1, offset[2]],
            [0, 0, 0, 1],
        ]
    )

    # apply transform
    moving_augment = np.hstack((moving, np.ones((moving.shape[0], 1))))

    moving_transformed = moving_augment @ transform.T

    return moving_transformed[:, 0:3]

MRI2FE.point_cloud_spacing(dims, points=None, lims=None)

Return the voxel spacing necessary to cover a point cloud with given voxel dimensions Can be called by either providing a point cloud or directly providing field limits.

Parameters:

Name Type Description Default
dims tuple

tuple of voxel count along each of m dimensions

required
points Union[list, tuple, ndarray]

locations of n points in m dimensions with shape (n,m)

None
lims ndarray

Min/max of point cloud space in m dimensions with shape (m,2)

None

Raises:

Type Description
TypeError

If inputs are not of correct type

ValueError

If dims is None or if neither points nor lims is provided

Returns:

Name Type Description
tuple tuple

spacing in each dimension

Source code in src/MRI2FE/utilities.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
def point_cloud_spacing(
    dims: ArrayLike,
    points: Optional[ArrayLike] = None,
    lims: Optional[ArrayLike] = None,
) -> tuple:
    """Return the voxel spacing necessary to cover a point cloud with given voxel dimensions
    Can be called by either providing a point cloud or directly providing field limits.

    Args:
        dims (tuple): tuple of voxel count along each of m dimensions
        points (Union[list, tuple, np.ndarray], optional): locations of n points in m dimensions with shape (n,m)
        lims (np.ndarray, optional): Min/max of point cloud space in m dimensions with shape (m,2)

    Raises:
        TypeError: If inputs are not of correct type
        ValueError: If dims is None or if neither points nor lims is provided

    Returns:
        tuple: spacing in each dimension
    """
    # Validate dims is not None
    if dims is None:
        raise ValueError("dims cannot be None")

    # check that either points or lims is provided
    if points is None and lims is None:
        raise ValueError("Must provide either points or lims for calculation")

    # convert inputs to np.ndarray
    dims = np.asarray(dims)

    if points is not None:
        points = np.asarray(points)

    if lims is not None:
        lims = np.asarray(lims)

    # check that dimensions match
    if points is not None and not (dims.shape[0] == points.shape[-1]):
        raise ValueError(
            "dims must have the same dimension as the point cloud"
        )

    if lims is not None and not (dims.shape[0] == lims.shape[0]):
        raise ValueError("dims must have the same dimension as lims")

    # find limits of point cloud if not provided
    if lims is None and points is not None:
        mins = np.min(points, axis=0)
        maxes = np.max(points, axis=0)
    elif lims is not None:
        mins = lims[:, 0]
        maxes = lims[:, 1]
    else:
        raise ValueError("Must provide lims or points")

    delta = maxes - mins

    spacing = delta / dims

    return spacing

MRI2FE.ants_affine(img)

Extract affine transformation matrix from ANTsImage.

Parameters:

Name Type Description Default
img ANTsImage

Input ANTs image

required

Raises:

Type Description
TypeError

If img is not an ANTsImage

ValueError

If img is None

Returns:

Type Description
ndarray

np.ndarray: 4x4 affine transformation matrix

Source code in src/MRI2FE/utilities.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def ants_affine(img: ants.core.ants_image.ANTsImage) -> np.ndarray:
    """Extract affine transformation matrix from ANTsImage.

    Args:
        img (ants.core.ants_image.ANTsImage): Input ANTs image

    Raises:
        TypeError: If img is not an ANTsImage
        ValueError: If img is None

    Returns:
        np.ndarray: 4x4 affine transformation matrix
    """
    # Validate input is not None
    if img is None:
        raise ValueError("img cannot be None")

    # Validate input type
    if not isinstance(img, ants.core.ants_image.ANTsImage):
        raise TypeError("img must be an ANTsImage object")

    # Extract image properties
    spacing = np.array(img.spacing)  # (sx, sy, sz)
    direction = np.array(img.direction).reshape((3, 3))  # 3x3 rotation matrix
    origin = np.array(img.origin)  # (ox, oy, oz)

    # Compute affine matrix
    affine_matrix = np.eye(4)  # Initialize as identity
    affine_matrix[:3, :3] = direction * spacing  # Scale direction by spacing
    affine_matrix[:3, 3] = origin  # Set translation

    return affine_matrix

MRI2FE.spatial_map(infile)

Convert data from NIFTI file to (4,n) array of x,y,z,voxel value in physical space

Parameters:

Name Type Description Default
infile nifti1

NIFTI file to convert

required

Raises:

Type Description
TypeError

If infile is not an ANTsImage

ValueError

If infile is None

Returns:

Name Type Description
coordinates_and_values ndarray

array of x,y,z coordinates in physical space with the corresponding voxel value

Source code in src/MRI2FE/utilities.py
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
def spatial_map(infile: ants.core.ants_image.ANTsImage) -> np.ndarray:
    """Convert data from NIFTI file to (4,n) array of x,y,z,voxel value in physical space

    Args:
        infile (nb.nifti1): NIFTI file to convert

    Raises:
        TypeError: If infile is not an ANTsImage
        ValueError: If infile is None

    Returns:
        coordinates_and_values (np.ndarray): array of x,y,z coordinates in physical space with the corresponding voxel value
    """
    # Validate input is not None
    if infile is None:
        raise ValueError("infile cannot be None")

    # Validate input type
    if not isinstance(infile, ants.core.ants_image.ANTsImage):
        raise TypeError("infile must be an ANTsImage object")

    # extract data from NIFTI
    image_data = infile.numpy()
    affine = ants_affine(infile)
    dimensions = image_data.shape

    # create coordinates in voxel space
    x = np.arange(dimensions[0])
    y = np.arange(dimensions[1])
    z = np.arange(dimensions[2])
    xv, yv, zv = np.meshgrid(x, y, z, indexing="ij")

    voxel_coords = np.vstack([xv.ravel(), yv.ravel(), zv.ravel()]).T
    voxel_coords_homogeneous = np.hstack(
        [voxel_coords, np.ones((voxel_coords.shape[0], 1))]
    )

    # map voxel coordinates to physical space
    physical_coords = voxel_coords_homogeneous @ affine.T
    voxel_values = np.round(image_data.ravel())
    coordinates_and_values = np.hstack(
        [physical_coords[:, :3], voxel_values[:, np.newaxis]]
    )

    return coordinates_and_values

MRI2FE.element_centroids(elnodes, node_coords)

Calculate the centroid of an element

Parameters:

Name Type Description Default
elnodes array

1D array containing EID, PID, and connected nodes

required
node_coords array

2D array containing NID, xyz coordinates

required

Raises:

Type Description
TypeError

If inputs are not numpy arrays

ValueError

If array dimensions or contents are invalid

Returns:

Name Type Description
centroid array

(3,) array containing average coordinate of the element

Source code in src/MRI2FE/utilities.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
def element_centroids(
    elnodes: Union[np.ndarray, tuple, list], node_coords: np.ndarray
) -> np.ndarray:
    """Calculate the centroid of an element

    Args:
        elnodes (np.array): 1D array containing EID, PID, and connected nodes
        node_coords (np.array): 2D array containing NID, xyz coordinates

    Raises:
        TypeError: If inputs are not numpy arrays
        ValueError: If array dimensions or contents are invalid

    Returns:
        centroid (np.array): (3,) array containing average coordinate of the element
    """
    """
    # Validate input types
    if not isinstance(elnodes, (np.ndarray, tuple, list)):
        raise TypeError("elnodes must be a numpy array, tuple, or list")
    if not isinstance(node_coords, np.ndarray):
        raise TypeError("node_coords must be a numpy array")

    # Validate array dimensions
    if len(elnodes.shape) != 1:
        raise ValueError("elnodes must be a 1D array")
    if len(node_coords.shape) != 2:
        raise ValueError("node_coords must be a 2D array")

    if node_coords.shape[1] != 4:  # Must have NID and xyz coordinates
        raise ValueError("node_coords must have 4 columns (NID, x, y, z)")
    """

    elnodes = np.asarray(elnodes)

    node_ids = np.unique(elnodes[2:])
    mask = np.zeros(len(node_coords), dtype=bool)
    for node_id in node_ids:
        mask |= node_coords[:, 0] == node_id

    cx = np.mean(node_coords[mask, 1:], axis=0)

    return cx.tolist()