-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathimageunit.py
More file actions
227 lines (196 loc) · 9.71 KB
/
imageunit.py
File metadata and controls
227 lines (196 loc) · 9.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# author : AC Chamberlain <[email protected]>
# copyright: AC Chamberlain (c) 2023-2026
# SPDX-License-Identifier: Licence.txt:
import math
import numpy as np
from pydicom import Dataset
from decorators import check_values_exist
from linaqa_types import supported_modalities
class Imager:
def __init__(self, datasets: list[Dataset], use_rescale: bool = False):
self.datasets = datasets
self.values = None
self._index = 0
self.rescale = use_rescale
self._window_width = 1000
self._window_center = 0
self._invflag = False
# check if dataset has an image
if (datasets[0].Modality in supported_modalities) and hasattr(datasets[0], "PixelData"):
# Dataset has 3D volume
if hasattr(datasets[0], "NumberOfFrames") and (int(datasets[0].NumberOfFrames) > 1):
self.size = (int(datasets[0].Rows), int(datasets[0].Columns), int(datasets[0].NumberOfFrames))
# Datasets have 2D planes
else:
self.size = (int(datasets[0].Rows), int(datasets[0].Columns), len(datasets))
# CT 3D dataset
if (hasattr(datasets[0], "PixelSpacing") and
hasattr(datasets[0], "SliceThickness") and
(datasets[0].SliceThickness is not None) and
(datasets[0].SliceThickness != "")):
self.spacings = (float(datasets[0].PixelSpacing[0]),
float(datasets[0].PixelSpacing[1]),
float(datasets[0].SliceThickness))
# 2D dataset
elif hasattr(datasets[0], "ImagePlanePixelSpacing"):
self.spacings = (float(datasets[0].ImagePlanePixelSpacing[0]),
float(datasets[0].ImagePlanePixelSpacing[1]),
float(1))
else:
self.spacings = (1, 1, 1)
self._index = int(self.size[2]/2)
self.axes = (np.arange(0.0, (self.size[0] + 1) * self.spacings[0], self.spacings[0]),
np.arange(0.0, (self.size[1] + 1) * self.spacings[1], self.spacings[1]),
np.arange(0.0, (self.size[2] + 1) * self.spacings[2], self.spacings[2]))
# Load pixel data
self.load_pixel_data(datasets)
self.auto_window()
def load_pixel_data(self, datasets):
# standard set of 2D images
if datasets[0].pixel_array.ndim == 2:
self.values = np.zeros(self.size, dtype="int32")
for i, d in enumerate(datasets):
# Also performs rescaling. "unsafe' since it converts from float64 to int32
np.copyto(self.values[:, :, i], d.pixel_array, 'unsafe')
# colour image 3 sample RGB per pixel
elif (datasets[0].pixel_array.ndim == 3) and (hasattr(datasets[0], "SamplesPerPixel")) and (datasets[0].SamplesPerPixel == 3):
self.values = np.zeros((self.size[0], self.size[1], len(datasets)), dtype='int32')
for i, d in enumerate(datasets):
# Convert RBG image to Grayscale
np.copyto(self.values[:, :, i], np.dot(d.pixel_array[...,:3], [0.2989, 0.5870, 0.1140]), 'unsafe')
# multi-frame image or 3D image
elif datasets[0].pixel_array.ndim == 3:
self.values = datasets[0].pixel_array.transpose(1, 2, 0)
@property
def index(self):
return self._index
@index.setter
def index(self, value):
if value < 0:
value = 0
if value >= self.size[2]:
value = self.size[2] - 1
self._index = value
@property
def window_width(self):
return self._window_width
@window_width.setter
def window_width(self, value):
self._window_width = max(value, 1)
@property
def window_center(self):
return self._window_center
@window_center.setter
def window_center(self, value):
self._window_center = value
@property
def invflag(self):
return self._invflag
@invflag.setter
def invflag(self, value: bool):
self._invflag = value
@check_values_exist
def get_image(self, index):
# int32 true values (HU or brightness units)
img = self.values[:, :, index]
if (self.rescale and hasattr(self.datasets[index], 'RescaleIntercept')
and hasattr(self.datasets[index], 'RescaleSlope')):
intercept = float(self.datasets[index].RescaleIntercept)
slope = float(self.datasets[index].RescaleSlope)
img = img*slope + intercept
# Vectorized windowing using boolean masks
w_left = (self._window_center - self._window_width / 2)
w_right = (self._window_center + self._window_width / 2)
mask_0 = img < w_left
mask_1 = img > w_right
mask_2 = np.invert(mask_0 + mask_1)
# Cast to RGB image so that QImage can handle it
rgb_array = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint32)
if self._invflag:
rgb_array[:, :, 0] = rgb_array[:, :, 1] = rgb_array[:, :, 2] = \
mask_1 * 0 + mask_0 * 255 + mask_2 * (255 * (w_right - img) / (w_right - w_left))
else:
rgb_array[:, :, 0] = rgb_array[:, :, 1] = rgb_array[:, :, 2] = \
mask_0 * 0 + mask_1 * 255 + mask_2 * (255 * (img - w_left) / (w_right - w_left))
# flatten RGB array to RGB32
res = (255 << 24 | rgb_array[:, :, 0] << 16 | rgb_array[:, :, 1] << 8 | rgb_array[:, :, 2])
return res
def get_current_image(self):
return self.get_image(self.index)
@check_values_exist
def auto_window(self):
win_max = np.max(self.values)
win_min = np.min(self.values)
if (self.rescale and hasattr(self.datasets[self.index], 'RescaleIntercept')
and hasattr(self.datasets[self.index], 'RescaleSlope')):
intercept = float(self.datasets[self.index].RescaleIntercept)
slope = float(self.datasets[self.index].RescaleSlope)
win_max = win_max * slope + intercept
win_min = win_min * slope + intercept
self._window_width = win_max-win_min
self._window_center = (win_max + win_min)//2
@check_values_exist
def flip_lr(self):
self.values = np.fliplr(self.values)
@check_values_exist
def flip_ud(self):
self.values = np.flipud(self.values)
@check_values_exist
def sum_images(self):
# collapse the images into one image.
if self.values.ndim == 3:
# create floating point matrix same size as values
fpvalues = np.array(self.values, dtype=float)
# for each image rescale pixel values to calibrated units.
for i, ds in enumerate(self.datasets):
slope = ds.RescaleSlope if hasattr(ds, 'RescaleSlope') else 1
intercept = ds.RescaleIntercept if hasattr(ds, 'RescaleIntercept') else 0
sign = ds.PixelIntensityRelationship if hasattr(ds, 'PixelIntensityRelationship') \
else math.copysign(1, slope)
fpvalues[:, :, i] = fpvalues[:, :, i]*slope + intercept
image_sum = np.sum(fpvalues, axis=2)
if sign == 1: # if sign=1 pixel values increase with x-ray intensity
# get slope to rescale values to max int16
intercept = np.min(image_sum)
slope = (np.max(image_sum) - intercept)/np.iinfo(np.uint16).max
else: # if sign = -1 pixel values decrease with x-ray intensity
intercept = np.max(image_sum)
slope = (np.min(image_sum) - intercept)/np.iinfo(np.uint16).max
image_sum = (image_sum - intercept)/slope
self.datasets[0].PixelData = image_sum.astype(np.uint16, casting='unsafe').tobytes()
self.size = (int(self.datasets[0].Rows), int(self.datasets[0].Columns), 1)
self.values = image_sum.reshape(int(self.datasets[0].Rows), int(self.datasets[0].Columns), 1)
# A rescale relationship must be established even if it didn't exist before
if not hasattr(self.datasets[0], 'PixelIntensityRelationship'):
self.datasets[0].PixelIntensityRelationship = 'LIN'
if not hasattr(self.datasets[0], 'PixelIntensityRelationshipSign'):
self.datasets[0].PixelIntensityRelationshipSign = int(sign)
self.datasets[0].RescaleSlope = slope
self.datasets[0].RescaleIntercept = intercept
self.datasets[0].RescaleType = 'CU'
for image in self.datasets[1:]:
self.datasets.remove(image)
self.index = 0
self.auto_window()
@check_values_exist
def avg_images(self):
# collapse the images into one image.
if self.values.ndim == 3:
image_sum = np.sum(self.values, axis=2)
image_sum = image_sum/self.size[2]
self.datasets[0].PixelData = image_sum.astype(np.uint16, casting='unsafe').tobytes()
self.size = (int(self.datasets[0].Rows), int(self.datasets[0].Columns), 1)
self.values = image_sum.reshape(int(self.datasets[0].Rows), int(self.datasets[0].Columns), 1)
for image in self.datasets[1:]:
self.datasets.remove(image)
self.index = 0
self.auto_window()
@check_values_exist
def scale_images(self, factor: float):
self.values = self.values*factor
if self.datasets[0].pixel_array.ndim == 3:
self.datasets[0].PixelData = self.values.astype(np.uint16, casting='unsafe').tobytes()
else:
for i, image in enumerate(self.datasets):
image.PixelData = self.values[:, :, i].astype(np.uint16, casting='unsafe').tobytes()
self.auto_window()