Commit 372bc2065d440c8adc3186da05c5d6893aec547c
Committed by
GitHub
Exists in
master
Merge pull request #432 from rmatsuda/fix-brain-mesh-peel-from-file
FIX: brain mesh peel from file
Showing
2 changed files
with
24 additions
and
21 deletions
Show diff stats
invesalius/data/brainmesh_handler.py
@@ -7,17 +7,18 @@ import numpy as np | @@ -7,17 +7,18 @@ import numpy as np | ||
7 | 7 | ||
8 | import invesalius.data.slice_ as sl | 8 | import invesalius.data.slice_ as sl |
9 | from invesalius.data.converters import to_vtk | 9 | from invesalius.data.converters import to_vtk |
10 | - | 10 | +import invesalius.data.vtk_utils as vtk_utils |
11 | 11 | ||
12 | class Brain: | 12 | class Brain: |
13 | - def __init__(self, n_peels, window_width, window_level, affine_vtk=None): | 13 | + def __init__(self, n_peels, window_width, window_level, affine, inv_proj): |
14 | # Create arrays to access the peel data and peel Actors | 14 | # Create arrays to access the peel data and peel Actors |
15 | self.peel = [] | 15 | self.peel = [] |
16 | self.peelActors = [] | 16 | self.peelActors = [] |
17 | self.window_width = window_width | 17 | self.window_width = window_width |
18 | self.window_level = window_level | 18 | self.window_level = window_level |
19 | self.numberOfPeels = n_peels | 19 | self.numberOfPeels = n_peels |
20 | - self.affine_vtk = affine_vtk | 20 | + self.affine = affine |
21 | + self.inv_proj = inv_proj | ||
21 | 22 | ||
22 | def from_mask(self, mask): | 23 | def from_mask(self, mask): |
23 | mask= np.array(mask.matrix[1:, 1:, 1:]) | 24 | mask= np.array(mask.matrix[1:, 1:, 1:]) |
@@ -52,6 +53,7 @@ class Brain: | @@ -52,6 +53,7 @@ class Brain: | ||
52 | def from_mask_file(self, mask_path): | 53 | def from_mask_file(self, mask_path): |
53 | slic = sl.Slice() | 54 | slic = sl.Slice() |
54 | image = slic.matrix | 55 | image = slic.matrix |
56 | + image = np.flip(image, axis=1) | ||
55 | image = to_vtk(image, spacing=slic.spacing) | 57 | image = to_vtk(image, spacing=slic.spacing) |
56 | 58 | ||
57 | # Read the mask | 59 | # Read the mask |
@@ -69,13 +71,10 @@ class Brain: | @@ -69,13 +71,10 @@ class Brain: | ||
69 | self._do_surface_creation(mask, mask_sFormMatrix) | 71 | self._do_surface_creation(mask, mask_sFormMatrix) |
70 | 72 | ||
71 | 73 | ||
72 | - def _do_surface_creation(self, mask, mask_sFormMatrix=None, qFormMatrix=None): | 74 | + def _do_surface_creation(self, mask, mask_sFormMatrix=None): |
73 | if mask_sFormMatrix is None: | 75 | if mask_sFormMatrix is None: |
74 | mask_sFormMatrix = vtk.vtkMatrix4x4() | 76 | mask_sFormMatrix = vtk.vtkMatrix4x4() |
75 | 77 | ||
76 | - if qFormMatrix is None: | ||
77 | - qFormMatrix = vtk.vtkMatrix4x4() | ||
78 | - | ||
79 | value = np.mean(mask.GetScalarRange()) | 78 | value = np.mean(mask.GetScalarRange()) |
80 | 79 | ||
81 | # Use the mask to create isosurface | 80 | # Use the mask to create isosurface |
@@ -115,14 +114,13 @@ class Brain: | @@ -115,14 +114,13 @@ class Brain: | ||
115 | tmpPeel = cleanMesh(tmpPeel) | 114 | tmpPeel = cleanMesh(tmpPeel) |
116 | 115 | ||
117 | refImageSpace2_xyz_transform = vtk.vtkTransform() | 116 | refImageSpace2_xyz_transform = vtk.vtkTransform() |
118 | - refImageSpace2_xyz_transform.SetMatrix(qFormMatrix) | 117 | + refImageSpace2_xyz_transform.SetMatrix(vtk_utils.numpy_to_vtkMatrix4x4(np.linalg.inv(self.affine))) |
119 | 118 | ||
120 | self.refImageSpace2_xyz = vtk.vtkTransformPolyDataFilter() | 119 | self.refImageSpace2_xyz = vtk.vtkTransformPolyDataFilter() |
121 | self.refImageSpace2_xyz.SetTransform(refImageSpace2_xyz_transform) | 120 | self.refImageSpace2_xyz.SetTransform(refImageSpace2_xyz_transform) |
122 | 121 | ||
123 | xyz2_refImageSpace_transform = vtk.vtkTransform() | 122 | xyz2_refImageSpace_transform = vtk.vtkTransform() |
124 | - qFormMatrix.Invert() | ||
125 | - xyz2_refImageSpace_transform.SetMatrix(qFormMatrix) | 123 | + xyz2_refImageSpace_transform.SetMatrix(vtk_utils.numpy_to_vtkMatrix4x4(self.affine)) |
126 | 124 | ||
127 | self.xyz2_refImageSpace = vtk.vtkTransformPolyDataFilter() | 125 | self.xyz2_refImageSpace = vtk.vtkTransformPolyDataFilter() |
128 | self.xyz2_refImageSpace.SetTransform(xyz2_refImageSpace_transform) | 126 | self.xyz2_refImageSpace.SetTransform(xyz2_refImageSpace_transform) |
@@ -138,14 +136,22 @@ class Brain: | @@ -138,14 +136,22 @@ class Brain: | ||
138 | self.peel_centers = vtk.vtkFloatArray() | 136 | self.peel_centers = vtk.vtkFloatArray() |
139 | self.peel.append(newPeel) | 137 | self.peel.append(newPeel) |
140 | self.currentPeelActor = vtk.vtkActor() | 138 | self.currentPeelActor = vtk.vtkActor() |
141 | - if self.affine_vtk: | ||
142 | - self.currentPeelActor.SetUserMatrix(self.affine_vtk) | 139 | + if not np.all(np.equal(self.affine, np.eye(4))): |
140 | + affine_vtk = self.CreateTransformedVTKAffine() | ||
141 | + self.currentPeelActor.SetUserMatrix(affine_vtk) | ||
143 | self.GetCurrentPeelActor(currentPeel) | 142 | self.GetCurrentPeelActor(currentPeel) |
144 | self.peelActors.append(self.currentPeelActor) | 143 | self.peelActors.append(self.currentPeelActor) |
145 | # locator will later find the triangle on the peel surface where the coil's normal intersect | 144 | # locator will later find the triangle on the peel surface where the coil's normal intersect |
146 | self.locator = vtk.vtkCellLocator() | 145 | self.locator = vtk.vtkCellLocator() |
147 | self.PeelDown(currentPeel) | 146 | self.PeelDown(currentPeel) |
148 | 147 | ||
148 | + def CreateTransformedVTKAffine(self): | ||
149 | + affine_transformed = self.affine.copy() | ||
150 | + matrix_shape = tuple(self.inv_proj.matrix_shape) | ||
151 | + affine_transformed[1, -1] -= matrix_shape[1] | ||
152 | + | ||
153 | + return vtk_utils.numpy_to_vtkMatrix4x4(affine_transformed) | ||
154 | + | ||
149 | def get_actor(self, n): | 155 | def get_actor(self, n): |
150 | return self.GetPeelActor(n) | 156 | return self.GetPeelActor(n) |
151 | 157 | ||
@@ -216,8 +222,9 @@ class Brain: | @@ -216,8 +222,9 @@ class Brain: | ||
216 | 222 | ||
217 | def TransformPeelPosition(self, p): | 223 | def TransformPeelPosition(self, p): |
218 | peel_transform = vtk.vtkTransform() | 224 | peel_transform = vtk.vtkTransform() |
219 | - if self.affine_vtk: | ||
220 | - peel_transform.SetMatrix(self.affine_vtk) | 225 | + if not np.all(np.equal(self.affine, np.eye(4))): |
226 | + affine_vtk = self.CreateTransformedVTKAffine() | ||
227 | + peel_transform.SetMatrix(affine_vtk) | ||
221 | refpeelspace = vtk.vtkTransformPolyDataFilter() | 228 | refpeelspace = vtk.vtkTransformPolyDataFilter() |
222 | refpeelspace.SetInputData(self.peel[p]) | 229 | refpeelspace.SetInputData(self.peel[p]) |
223 | refpeelspace.SetTransform(peel_transform) | 230 | refpeelspace.SetTransform(peel_transform) |
invesalius/gui/task_navigator.py
@@ -2023,18 +2023,14 @@ class TractographyPanel(wx.Panel): | @@ -2023,18 +2023,14 @@ class TractographyPanel(wx.Panel): | ||
2023 | slic = sl.Slice() | 2023 | slic = sl.Slice() |
2024 | ww = slic.window_width | 2024 | ww = slic.window_width |
2025 | wl = slic.window_level | 2025 | wl = slic.window_level |
2026 | - affine_vtk = vtk.vtkMatrix4x4() | ||
2027 | - | 2026 | + affine = np.eye(4) |
2028 | if method == peels_dlg.FROM_FILES: | 2027 | if method == peels_dlg.FROM_FILES: |
2029 | - matrix_shape = tuple(inv_proj.matrix_shape) | ||
2030 | try: | 2028 | try: |
2031 | affine = slic.affine.copy() | 2029 | affine = slic.affine.copy() |
2032 | except AttributeError: | 2030 | except AttributeError: |
2033 | - affine = np.eye(4) | ||
2034 | - affine[1, -1] -= matrix_shape[1] | ||
2035 | - affine_vtk = vtk_utils.numpy_to_vtkMatrix4x4(affine) | 2031 | + pass |
2036 | 2032 | ||
2037 | - self.brain_peel = brain.Brain(self.n_peels, ww, wl, affine_vtk) | 2033 | + self.brain_peel = brain.Brain(self.n_peels, ww, wl, affine, inv_proj) |
2038 | if method == peels_dlg.FROM_MASK: | 2034 | if method == peels_dlg.FROM_MASK: |
2039 | choices = [i for i in inv_proj.mask_dict.values()] | 2035 | choices = [i for i in inv_proj.mask_dict.values()] |
2040 | mask_index = peels_dlg.cb_masks.GetSelection() | 2036 | mask_index = peels_dlg.cb_masks.GetSelection() |