Upload 4 files
Browse files
data.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Owkin, Inc.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Copyright (c) Owkin, Inc.
|
8 |
+
# All rights reserved.
|
9 |
+
#
|
10 |
+
# This source code is licensed under the license found in the
|
11 |
+
# LICENSE file in the root directory of this source tree.
|
12 |
+
|
13 |
+
from typing import Tuple
|
14 |
+
import numpy as np
|
15 |
+
import torch
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
|
19 |
+
class SlideFeaturesDataset:
|
20 |
+
"""Slide features dataset."""
|
21 |
+
def __init__(self, *args, **kwargs):
|
22 |
+
self.hf_dataset = datasets.oad_dataset(*args, **kwargs).with_format("torch")
|
23 |
+
|
24 |
+
def __getitem__(self, item: np.int64) -> Tuple[torch.Tensor, torch.Tensor]:
|
25 |
+
"""
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
item: np.int64
|
29 |
+
Index of item, will be converted to int.
|
30 |
+
|
31 |
+
Returns
|
32 |
+
-------
|
33 |
+
Tuple[torch.Tensor, torch.Tensor]
|
34 |
+
(1000, 768), ()
|
35 |
+
"""
|
36 |
+
return (
|
37 |
+
self.hf_dataset[int(item)]["features"],
|
38 |
+
self.hf_dataset[int(item)]["label"].unsqueeze(0).float()
|
39 |
+
)
|
40 |
+
|
41 |
+
def __len__(self) -> int:
|
42 |
+
return len(self.hf_dataset)
|
43 |
+
|
44 |
+
@property
|
45 |
+
def labels(self):
|
46 |
+
return self.hf_dataset["label"]
|
module.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Owkin, Inc.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Copyright (c) Owkin, Inc.
|
8 |
+
# All rights reserved.
|
9 |
+
#
|
10 |
+
# This source code is licensed under the license found in the
|
11 |
+
# LICENSE file in the root directory of this source tree.
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
import warnings
|
16 |
+
from typing import List, Optional, Tuple, Union
|
17 |
+
|
18 |
+
import torch
|
19 |
+
from torch import nn
|
20 |
+
|
21 |
+
|
22 |
+
class MLP(torch.nn.Sequential):
|
23 |
+
"""MLP Module.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
in_features: int
|
28 |
+
Features (model input) dimension.
|
29 |
+
out_features: int = 1
|
30 |
+
Prediction (model output) dimension.
|
31 |
+
hidden: Optional[List[int]] = None
|
32 |
+
Dimension of hidden layer(s).
|
33 |
+
dropout: Optional[List[float]] = None
|
34 |
+
Dropout rate(s).
|
35 |
+
activation: Optional[torch.nn.Module] = torch.nn.Sigmoid
|
36 |
+
MLP activation.
|
37 |
+
bias: bool = True
|
38 |
+
Add bias to MLP hidden layers.
|
39 |
+
|
40 |
+
Raises
|
41 |
+
------
|
42 |
+
ValueError
|
43 |
+
If ``hidden`` and ``dropout`` do not share the same length.
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(
|
47 |
+
self,
|
48 |
+
in_features: int,
|
49 |
+
out_features: int,
|
50 |
+
hidden: Optional[List[int]] = None,
|
51 |
+
dropout: Optional[List[float]] = None,
|
52 |
+
activation: Optional[torch.nn.Module] = torch.nn.Sigmoid(),
|
53 |
+
bias: bool = True,
|
54 |
+
):
|
55 |
+
if dropout is not None:
|
56 |
+
if hidden is not None:
|
57 |
+
assert len(hidden) == len(
|
58 |
+
dropout
|
59 |
+
), "hidden and dropout must have the same length"
|
60 |
+
else:
|
61 |
+
raise ValueError(
|
62 |
+
"hidden must have a value and have the same length as dropout if dropout is given."
|
63 |
+
)
|
64 |
+
|
65 |
+
d_model = in_features
|
66 |
+
layers = []
|
67 |
+
|
68 |
+
if hidden is not None:
|
69 |
+
for i, h in enumerate(hidden):
|
70 |
+
seq = [torch.nn.Linear(d_model, h, bias=bias)]
|
71 |
+
d_model = h
|
72 |
+
|
73 |
+
if activation is not None:
|
74 |
+
seq.append(activation)
|
75 |
+
|
76 |
+
if dropout is not None:
|
77 |
+
seq.append(torch.nn.Dropout(dropout[i]))
|
78 |
+
|
79 |
+
layers.append(torch.nn.Sequential(*seq))
|
80 |
+
|
81 |
+
layers.append(torch.nn.Linear(d_model, out_features))
|
82 |
+
|
83 |
+
super(MLP, self).__init__(*layers)
|
84 |
+
|
85 |
+
class MaskedLinear(torch.nn.Linear):
|
86 |
+
"""
|
87 |
+
Linear layer to be applied tile wise.
|
88 |
+
This layer can be used in combination with a mask
|
89 |
+
to prevent padding tiles from influencing the values of a subsequent
|
90 |
+
activation.
|
91 |
+
Example:
|
92 |
+
>>> module = Linear(in_features=128, out_features=1) # With Linear
|
93 |
+
>>> out = module(slide)
|
94 |
+
>>> wrong_value = torch.sigmoid(out) # Value is influenced by padding
|
95 |
+
>>> module = MaskedLinear(in_features=128, out_features=1, mask_value='-inf') # With MaskedLinear
|
96 |
+
>>> out = module(slide, mask) # Padding now has the '-inf' value
|
97 |
+
>>> correct_value = torch.sigmoid(out) # Value is not influenced by padding as sigmoid('-inf') = 0
|
98 |
+
Parameters
|
99 |
+
----------
|
100 |
+
in_features: int
|
101 |
+
size of each input sample
|
102 |
+
out_features: int
|
103 |
+
size of each output sample
|
104 |
+
mask_value: Union[str, int]
|
105 |
+
value to give to the mask
|
106 |
+
bias: bool = True
|
107 |
+
If set to ``False``, the layer will not learn an additive bias.
|
108 |
+
"""
|
109 |
+
|
110 |
+
def __init__(
|
111 |
+
self,
|
112 |
+
in_features: int,
|
113 |
+
out_features: int,
|
114 |
+
mask_value: Union[str, float],
|
115 |
+
bias: bool = True,
|
116 |
+
):
|
117 |
+
super(MaskedLinear, self).__init__(
|
118 |
+
in_features=in_features, out_features=out_features, bias=bias
|
119 |
+
)
|
120 |
+
self.mask_value = mask_value
|
121 |
+
|
122 |
+
def forward(
|
123 |
+
self, x: torch.Tensor, mask: Optional[torch.BoolTensor] = None
|
124 |
+
): # pylint: disable=arguments-renamed
|
125 |
+
"""Forward pass.
|
126 |
+
|
127 |
+
Parameters
|
128 |
+
----------
|
129 |
+
x: torch.Tensor
|
130 |
+
Input tensor, shape (B, SEQ_LEN, IN_FEATURES).
|
131 |
+
mask: Optional[torch.BoolTensor] = None
|
132 |
+
True for values that were padded, shape (B, SEQ_LEN, 1),
|
133 |
+
|
134 |
+
Returns
|
135 |
+
-------
|
136 |
+
x: torch.Tensor
|
137 |
+
(B, SEQ_LEN, OUT_FEATURES)
|
138 |
+
"""
|
139 |
+
x = super(MaskedLinear, self).forward(x)
|
140 |
+
if mask is not None:
|
141 |
+
x = x.masked_fill(mask, float(self.mask_value))
|
142 |
+
return x
|
143 |
+
|
144 |
+
def extra_repr(self):
|
145 |
+
return (
|
146 |
+
f"in_features={self.in_features}, out_features={self.out_features}, "
|
147 |
+
f"mask_value={self.mask_value}, bias={self.bias is not None}"
|
148 |
+
)
|
149 |
+
|
150 |
+
class TilesMLP(torch.nn.Module):
|
151 |
+
"""MLP to be applied to tiles to compute scores.
|
152 |
+
This module can be used in combination of a mask
|
153 |
+
to prevent padding from influencing the scores values.
|
154 |
+
Parameters
|
155 |
+
----------
|
156 |
+
in_features: int
|
157 |
+
size of each input sample
|
158 |
+
out_features: int
|
159 |
+
size of each output sample
|
160 |
+
hidden: Optional[List[int]] = None
|
161 |
+
Number of hidden layers and their respective number of features.
|
162 |
+
bias: bool = True
|
163 |
+
If set to ``False``, the layer will not learn an additive bias.
|
164 |
+
activation: torch.nn.Module = torch.nn.Sigmoid()
|
165 |
+
MLP activation function
|
166 |
+
dropout: Optional[torch.nn.Module] = None
|
167 |
+
Optional dropout module. Will be interlaced with the linear layers.
|
168 |
+
"""
|
169 |
+
|
170 |
+
def __init__(
|
171 |
+
self,
|
172 |
+
in_features: int,
|
173 |
+
out_features: int = 1,
|
174 |
+
hidden: Optional[List[int]] = None,
|
175 |
+
bias: bool = True,
|
176 |
+
activation: torch.nn.Module = torch.nn.Sigmoid(),
|
177 |
+
dropout: Optional[torch.nn.Module] = None,
|
178 |
+
):
|
179 |
+
super(TilesMLP, self).__init__()
|
180 |
+
|
181 |
+
self.hidden_layers = torch.nn.ModuleList()
|
182 |
+
if hidden is not None:
|
183 |
+
for h in hidden:
|
184 |
+
self.hidden_layers.append(
|
185 |
+
MaskedLinear(in_features, h, bias=bias, mask_value="-inf")
|
186 |
+
)
|
187 |
+
self.hidden_layers.append(activation)
|
188 |
+
if dropout:
|
189 |
+
self.hidden_layers.append(dropout)
|
190 |
+
in_features = h
|
191 |
+
|
192 |
+
self.hidden_layers.append(
|
193 |
+
torch.nn.Linear(in_features, out_features, bias=bias)
|
194 |
+
)
|
195 |
+
|
196 |
+
def forward(
|
197 |
+
self, x: torch.Tensor, mask: Optional[torch.BoolTensor] = None
|
198 |
+
):
|
199 |
+
"""Forward pass.
|
200 |
+
|
201 |
+
Parameters
|
202 |
+
----------
|
203 |
+
x: torch.Tensor
|
204 |
+
(B, N_TILES, IN_FEATURES)
|
205 |
+
mask: Optional[torch.BoolTensor] = None
|
206 |
+
(B, N_TILES), True for values that were padded.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
x: torch.Tensor
|
211 |
+
(B, N_TILES, OUT_FEATURES)
|
212 |
+
"""
|
213 |
+
for layer in self.hidden_layers:
|
214 |
+
if isinstance(layer, MaskedLinear):
|
215 |
+
x = layer(x, mask)
|
216 |
+
else:
|
217 |
+
x = layer(x)
|
218 |
+
return x
|
219 |
+
|
220 |
+
class ExtremeLayer(torch.nn.Module):
|
221 |
+
"""Extreme layer.
|
222 |
+
Returns concatenation of n_top top tiles and n_bottom bottom tiles
|
223 |
+
.. warning::
|
224 |
+
If top tiles or bottom tiles is superior to the true number of
|
225 |
+
tiles in the input then padded tiles will be selected and their value
|
226 |
+
will be 0.
|
227 |
+
Parameters
|
228 |
+
----------
|
229 |
+
n_top: Optional[int] = None
|
230 |
+
Number of top tiles to select
|
231 |
+
n_bottom: Optional[int] = None
|
232 |
+
Number of bottom tiles to select
|
233 |
+
dim: int = 1
|
234 |
+
Dimension to select top/bottom tiles from
|
235 |
+
return_indices: bool = False
|
236 |
+
Whether to return the indices of the extreme tiles
|
237 |
+
|
238 |
+
Raises
|
239 |
+
------
|
240 |
+
ValueError
|
241 |
+
If ``n_top`` and ``n_bottom`` are set to ``None`` or both are 0.
|
242 |
+
"""
|
243 |
+
|
244 |
+
def __init__(
|
245 |
+
self,
|
246 |
+
n_top: Optional[int] = None,
|
247 |
+
n_bottom: Optional[int] = None,
|
248 |
+
dim: int = 1,
|
249 |
+
return_indices: bool = False,
|
250 |
+
):
|
251 |
+
super(ExtremeLayer, self).__init__()
|
252 |
+
|
253 |
+
if not (n_top is not None or n_bottom is not None):
|
254 |
+
raise ValueError("one of n_top or n_bottom must have a value.")
|
255 |
+
|
256 |
+
if not (
|
257 |
+
(n_top is not None and n_top > 0)
|
258 |
+
or (n_bottom is not None and n_bottom > 0)
|
259 |
+
):
|
260 |
+
raise ValueError("one of n_top or n_bottom must have a value > 0.")
|
261 |
+
|
262 |
+
self.n_top = n_top
|
263 |
+
self.n_bottom = n_bottom
|
264 |
+
self.dim = dim
|
265 |
+
self.return_indices = return_indices
|
266 |
+
|
267 |
+
def forward(
|
268 |
+
self, x: torch.Tensor, mask: Optional[torch.BoolTensor] = None
|
269 |
+
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
270 |
+
"""Forward pass.
|
271 |
+
Parameters
|
272 |
+
----------
|
273 |
+
x: torch.Tensor
|
274 |
+
Input tensor, shape (B, N_TILES, IN_FEATURES).
|
275 |
+
mask: Optional[torch.BoolTensor]
|
276 |
+
True for values that were padded, shape (B, N_TILES, 1).
|
277 |
+
|
278 |
+
Warnings
|
279 |
+
--------
|
280 |
+
If top tiles or bottom tiles is superior to the true number of tiles in
|
281 |
+
the input then padded tiles will be selected and their value will be 0.
|
282 |
+
|
283 |
+
Returns
|
284 |
+
-------
|
285 |
+
values: torch.Tensor
|
286 |
+
Extreme tiles, shape (B, N_TOP + N_BOTTOM).
|
287 |
+
indices: torch.Tensor
|
288 |
+
If ``self.return_indices=True``, return extreme tiles' indices.
|
289 |
+
"""
|
290 |
+
|
291 |
+
if (
|
292 |
+
self.n_top
|
293 |
+
and self.n_bottom
|
294 |
+
and ((self.n_top + self.n_bottom) > x.shape[self.dim])
|
295 |
+
):
|
296 |
+
warnings.warn(
|
297 |
+
f"Sum of tops is larger than the input tensor shape for dimension {self.dim}: "
|
298 |
+
f"{self.n_top + self.n_bottom} > {x.shape[self.dim]}. "
|
299 |
+
f"Values will appear twice (in top and in bottom)"
|
300 |
+
)
|
301 |
+
|
302 |
+
top, bottom = None, None
|
303 |
+
top_idx, bottom_idx = None, None
|
304 |
+
if mask is not None:
|
305 |
+
if self.n_top:
|
306 |
+
top, top_idx = x.masked_fill(mask, float("-inf")).topk(
|
307 |
+
k=self.n_top, sorted=True, dim=self.dim
|
308 |
+
)
|
309 |
+
top_mask = top.eq(float("-inf"))
|
310 |
+
if top_mask.any():
|
311 |
+
warnings.warn(
|
312 |
+
"The top tiles contain masked values, they will be set to zero."
|
313 |
+
)
|
314 |
+
top[top_mask] = 0
|
315 |
+
|
316 |
+
if self.n_bottom:
|
317 |
+
bottom, bottom_idx = x.masked_fill(mask, float("inf")).topk(
|
318 |
+
k=self.n_bottom, largest=False, sorted=True, dim=self.dim
|
319 |
+
)
|
320 |
+
bottom_mask = bottom.eq(float("inf"))
|
321 |
+
if bottom_mask.any():
|
322 |
+
warnings.warn(
|
323 |
+
"The bottom tiles contain masked values, they will be set to zero."
|
324 |
+
)
|
325 |
+
bottom[bottom_mask] = 0
|
326 |
+
else:
|
327 |
+
if self.n_top:
|
328 |
+
top, top_idx = x.topk(k=self.n_top, sorted=True, dim=self.dim)
|
329 |
+
if self.n_bottom:
|
330 |
+
bottom, bottom_idx = x.topk(
|
331 |
+
k=self.n_bottom, largest=False, sorted=True, dim=self.dim
|
332 |
+
)
|
333 |
+
|
334 |
+
if top is not None and bottom is not None:
|
335 |
+
values = torch.cat([top, bottom], dim=self.dim)
|
336 |
+
indices = torch.cat([top_idx, bottom_idx], dim=self.dim)
|
337 |
+
elif top is not None:
|
338 |
+
values = top
|
339 |
+
indices = top_idx
|
340 |
+
elif bottom is not None:
|
341 |
+
values = bottom
|
342 |
+
indices = bottom_idx
|
343 |
+
else:
|
344 |
+
raise ValueError
|
345 |
+
|
346 |
+
if self.return_indices:
|
347 |
+
return values, indices
|
348 |
+
else:
|
349 |
+
return values
|
350 |
+
|
351 |
+
def extra_repr(self) -> str:
|
352 |
+
"""Format representation."""
|
353 |
+
return f"n_top={self.n_top}, n_bottom={self.n_bottom}"
|
354 |
+
|
355 |
+
|
356 |
+
class Chowder(nn.Module):
|
357 |
+
"""Chowder MIL model (See [1]_).
|
358 |
+
|
359 |
+
Example:
|
360 |
+
>>> module = Chowder(in_features=128, out_features=1, n_top=5, n_bottom=5)
|
361 |
+
>>> logits, extreme_scores = module(slide, mask=mask)
|
362 |
+
>>> scores = module.score_model(slide, mask=mask)
|
363 |
+
|
364 |
+
Parameters
|
365 |
+
----------
|
366 |
+
in_features: int
|
367 |
+
Features (model input) dimension.
|
368 |
+
out_features: int
|
369 |
+
Controls the number of scores and, by extension, the number of out_features.
|
370 |
+
n_top: int
|
371 |
+
Number of tiles with hightest scores that are selected and fed to the MLP.
|
372 |
+
n_bottom: int
|
373 |
+
Number of tiles with lowest scores that are selected and fed to the MLP.
|
374 |
+
tiles_mlp_hidden: Optional[List[int]] = None
|
375 |
+
Number of units for layers in the first MLP applied tile wise to compute
|
376 |
+
a score for each tiles from the tile features.
|
377 |
+
If `None`, a linear layer is used to compute tile scores.
|
378 |
+
If e.g. `[128, 64]`, the tile scores are computed with a MLP of dimension
|
379 |
+
features_dim -> 128 -> 64 -> 1.
|
380 |
+
mlp_hidden: Optional[List[int]] = None
|
381 |
+
Number of units for layers of the second MLP that combine top and bottom
|
382 |
+
scores and outputs a final prediction at the slide-level. If `None`, a
|
383 |
+
linear layer is used to compute the prediction from the extreme scores.
|
384 |
+
If e.g. `[128, 64]`, the prediction is computed
|
385 |
+
with a MLP n_top + n_bottom -> 128 -> 64 -> 1.
|
386 |
+
mlp_dropout: Optional[List[float]] = None
|
387 |
+
Dropout that is used for each layer of the MLP. If `None`, no dropout
|
388 |
+
is used.
|
389 |
+
mlp_activation: Optional[torch.nn.Module] = torch.nn.Sigmoid
|
390 |
+
Activation that is used after each layer of the MLP.
|
391 |
+
bias: bool = True
|
392 |
+
Whether to add bias for layers of the tiles MLP.
|
393 |
+
|
394 |
+
References
|
395 |
+
----------
|
396 |
+
.. [1] Pierre Courtiol, Eric W. Tramel, Marc Sanselme, and Gilles Wainrib. Classification
|
397 |
+
and disease localization in histopathology using only global labels: A weakly-supervised
|
398 |
+
approach. CoRR, abs/1802.02212, 2018.
|
399 |
+
"""
|
400 |
+
|
401 |
+
def __init__(
|
402 |
+
self,
|
403 |
+
in_features: int,
|
404 |
+
out_features: int,
|
405 |
+
n_top: Optional[int] = None,
|
406 |
+
n_bottom: Optional[int] = None,
|
407 |
+
tiles_mlp_hidden: Optional[List[int]] = None,
|
408 |
+
mlp_hidden: Optional[List[int]] = None,
|
409 |
+
mlp_dropout: Optional[List[float]] = None,
|
410 |
+
mlp_activation: Optional[torch.nn.Module] = torch.nn.Sigmoid(),
|
411 |
+
bias: bool = True,
|
412 |
+
) -> None:
|
413 |
+
super(Chowder, self).__init__()
|
414 |
+
if n_top is None and n_bottom is None:
|
415 |
+
raise ValueError(
|
416 |
+
"At least one of `n_top` or `n_bottom` must not be None."
|
417 |
+
)
|
418 |
+
|
419 |
+
if mlp_dropout is not None:
|
420 |
+
if mlp_hidden is not None:
|
421 |
+
assert len(mlp_hidden) == len(
|
422 |
+
mlp_dropout
|
423 |
+
), "mlp_hidden and mlp_dropout must have the same length"
|
424 |
+
else:
|
425 |
+
raise ValueError(
|
426 |
+
"mlp_hidden must have a value and have the same length as mlp_dropout if mlp_dropout is given."
|
427 |
+
)
|
428 |
+
|
429 |
+
self.score_model = TilesMLP(
|
430 |
+
in_features,
|
431 |
+
hidden=tiles_mlp_hidden,
|
432 |
+
bias=bias,
|
433 |
+
out_features=out_features,
|
434 |
+
)
|
435 |
+
self.score_model.apply(self.weight_initialization)
|
436 |
+
|
437 |
+
self.extreme_layer = ExtremeLayer(n_top=n_top, n_bottom=n_bottom)
|
438 |
+
|
439 |
+
mlp_in_features = n_top + n_bottom
|
440 |
+
self.mlp = MLP(
|
441 |
+
mlp_in_features,
|
442 |
+
1,
|
443 |
+
hidden=mlp_hidden,
|
444 |
+
dropout=mlp_dropout,
|
445 |
+
activation=mlp_activation,
|
446 |
+
)
|
447 |
+
self.mlp.apply(self.weight_initialization)
|
448 |
+
|
449 |
+
@staticmethod
|
450 |
+
def weight_initialization(module: torch.nn.Module) -> None:
|
451 |
+
"""Initialize weights for the module using Xavier initialization method,
|
452 |
+
"Understanding the difficulty of training deep feedforward neural networks",
|
453 |
+
Glorot, X. & Bengio, Y. (2010)."""
|
454 |
+
if isinstance(module, torch.nn.Linear):
|
455 |
+
torch.nn.init.xavier_uniform_(module.weight)
|
456 |
+
|
457 |
+
if module.bias is not None:
|
458 |
+
module.bias.data.fill_(0.0)
|
459 |
+
|
460 |
+
def forward(
|
461 |
+
self, features: torch.Tensor, mask: Optional[torch.BoolTensor] = None
|
462 |
+
) -> torch.Tensor:
|
463 |
+
"""
|
464 |
+
Parameters
|
465 |
+
----------
|
466 |
+
features: torch.Tensor
|
467 |
+
(B, N_TILES, IN_FEATURES)
|
468 |
+
mask: Optional[torch.BoolTensor] = None
|
469 |
+
(B, N_TILES, 1), True for values that were padded.
|
470 |
+
|
471 |
+
Returns
|
472 |
+
-------
|
473 |
+
logits, extreme_scores: Tuple[torch.Tensor, torch.Tensor]:
|
474 |
+
(B, OUT_FEATURES), (B, N_TOP + N_BOTTOM, OUT_FEATURES)
|
475 |
+
"""
|
476 |
+
scores = self.score_model(x=features[..., 3:], mask=mask)
|
477 |
+
extreme_scores = self.extreme_layer(
|
478 |
+
x=scores, mask=mask
|
479 |
+
) # (B, N_TOP + N_BOTTOM, OUT_FEATURES)
|
480 |
+
|
481 |
+
# Apply MLP to the N_TOP + N_BOTTOM scores.
|
482 |
+
y = self.mlp(extreme_scores.transpose(1, 2)) # (B, OUT_FEATURES, 1)
|
483 |
+
|
484 |
+
return y.squeeze(2)
|
trainer.py
ADDED
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Owkin, Inc.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Copyright (c) Owkin, Inc.
|
8 |
+
# All rights reserved.
|
9 |
+
#
|
10 |
+
# This source code is licensed under the license found in the
|
11 |
+
# LICENSE file in the root directory of this source tree.
|
12 |
+
|
13 |
+
import pickle
|
14 |
+
from pathlib import Path
|
15 |
+
from typing import Callable, Dict, List, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
from torch import nn
|
20 |
+
from torch.optim import Adam
|
21 |
+
from torch.utils.data import DataLoader, Subset
|
22 |
+
|
23 |
+
|
24 |
+
def slide_level_train_step(
|
25 |
+
model: torch.nn.Module,
|
26 |
+
train_dataloader: torch.utils.data.DataLoader,
|
27 |
+
criterion: torch.nn.Module,
|
28 |
+
optimizer: torch.optim.Optimizer,
|
29 |
+
device: str = "cpu",
|
30 |
+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
31 |
+
"""Training step for slide-level experiments. This will serve as the
|
32 |
+
``train_step`` in ``TorchTrainer``printclass.
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
model: nn.Module
|
37 |
+
The PyTorch model to be trained.
|
38 |
+
train_dataloader: torch.utils.data.DataLoader
|
39 |
+
Training data loader.
|
40 |
+
criterion: nn.Module
|
41 |
+
The loss criterion used for training.
|
42 |
+
optimizer: Callable = Adam
|
43 |
+
The optimizer class to use.
|
44 |
+
device : str = "cpu"
|
45 |
+
The device to use for training and evaluation.
|
46 |
+
"""
|
47 |
+
model.train()
|
48 |
+
|
49 |
+
_epoch_loss, _epoch_logits, _epoch_labels = [], [], []
|
50 |
+
|
51 |
+
for batch in train_dataloader:
|
52 |
+
# Get data.
|
53 |
+
features, mask, labels = batch
|
54 |
+
|
55 |
+
# Put on device.
|
56 |
+
features = features.to(device)
|
57 |
+
mask = mask.to(device)
|
58 |
+
labels = labels.to(device)
|
59 |
+
|
60 |
+
# Compute logits and loss.
|
61 |
+
logits = model(features, mask)
|
62 |
+
loss = criterion(logits, labels)
|
63 |
+
loss.backward()
|
64 |
+
optimizer.step()
|
65 |
+
optimizer.zero_grad()
|
66 |
+
|
67 |
+
# Stack logits & labels to compute epoch metrics.
|
68 |
+
_epoch_loss.append(loss.detach().cpu().numpy())
|
69 |
+
_epoch_logits.append(logits.detach())
|
70 |
+
_epoch_labels.append(labels.detach())
|
71 |
+
|
72 |
+
_epoch_loss = np.mean(_epoch_loss)
|
73 |
+
_epoch_logits = torch.cat(_epoch_logits, dim=0).cpu().numpy()
|
74 |
+
_epoch_labels = torch.cat(_epoch_labels, dim=0).cpu().numpy()
|
75 |
+
|
76 |
+
return _epoch_loss, _epoch_logits, _epoch_labels
|
77 |
+
|
78 |
+
|
79 |
+
def slide_level_val_step(
|
80 |
+
model: torch.nn.Module,
|
81 |
+
val_dataloader: torch.utils.data.DataLoader,
|
82 |
+
criterion: torch.nn.Module,
|
83 |
+
device: str,
|
84 |
+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
85 |
+
"""Inference step for slide-level experiments. This will serve as the
|
86 |
+
``val_step`` in ``TorchTrainer``class.
|
87 |
+
|
88 |
+
Parameters
|
89 |
+
----------
|
90 |
+
model: nn.Module
|
91 |
+
The PyTorch model to be trained.
|
92 |
+
val_dataloader: torch.utils.data.DataLoader
|
93 |
+
Inference data loader.
|
94 |
+
criterion: nn.Module
|
95 |
+
The loss criterion used for training.
|
96 |
+
device : str = "cpu"
|
97 |
+
The device to use for training and evaluation.
|
98 |
+
"""
|
99 |
+
model.eval()
|
100 |
+
|
101 |
+
with torch.no_grad():
|
102 |
+
_epoch_loss, _epoch_logits, _epoch_labels = [], [], []
|
103 |
+
|
104 |
+
for batch in val_dataloader:
|
105 |
+
# Get data.
|
106 |
+
features, mask, labels = batch
|
107 |
+
|
108 |
+
# Put on device.
|
109 |
+
features = features.to(device)
|
110 |
+
mask = mask.to(device)
|
111 |
+
labels = labels.to(device)
|
112 |
+
|
113 |
+
# Compute logits and loss.
|
114 |
+
logits = model(features, mask)
|
115 |
+
loss = criterion(logits, labels)
|
116 |
+
|
117 |
+
# Stack logits & labels to compute epoch metrics.
|
118 |
+
_epoch_loss.append(loss.detach().cpu().numpy())
|
119 |
+
_epoch_logits.append(logits.detach())
|
120 |
+
_epoch_labels.append(labels.detach())
|
121 |
+
|
122 |
+
_epoch_loss = np.mean(_epoch_loss)
|
123 |
+
_epoch_logits = torch.cat(_epoch_logits, dim=0).cpu().numpy()
|
124 |
+
_epoch_labels = torch.cat(_epoch_labels, dim=0).cpu().numpy()
|
125 |
+
|
126 |
+
return _epoch_loss, _epoch_logits, _epoch_labels
|
127 |
+
|
128 |
+
|
129 |
+
class BaseTrainer:
|
130 |
+
"""Base trainer class with ``train``, ``evaluate``, ``save`` and ``load``
|
131 |
+
methods. ``train`` and ``evaluate`` methods should be overriden."""
|
132 |
+
|
133 |
+
def __init__(self):
|
134 |
+
pass
|
135 |
+
|
136 |
+
def train(
|
137 |
+
self,
|
138 |
+
train_set: Subset,
|
139 |
+
val_set: Subset,
|
140 |
+
) -> Tuple[Dict[str, float], Dict[str, float]]:
|
141 |
+
"""Training function."""
|
142 |
+
raise NotImplementedError
|
143 |
+
|
144 |
+
def evaluate(
|
145 |
+
self,
|
146 |
+
test_set: Subset,
|
147 |
+
) -> Dict[str, float]:
|
148 |
+
"""Inference function."""
|
149 |
+
raise NotImplementedError
|
150 |
+
|
151 |
+
def save(self, filepath: Union[Path, str]):
|
152 |
+
"""Model serialization."""
|
153 |
+
filepath = Path(filepath).with_suffix(".pkl")
|
154 |
+
with filepath.open("wb") as p:
|
155 |
+
pickle.dump(self, p)
|
156 |
+
|
157 |
+
@classmethod
|
158 |
+
def load(cls, filepath: Union[Path, str]):
|
159 |
+
"""Model loading."""
|
160 |
+
del cls
|
161 |
+
filepath = Path(filepath).with_suffix(".pkl")
|
162 |
+
with filepath.open("rb") as p:
|
163 |
+
obj = pickle.load(p)
|
164 |
+
return obj
|
165 |
+
|
166 |
+
|
167 |
+
class TorchTrainer(BaseTrainer):
|
168 |
+
"""Trainer class for training and evaluating PyTorch models.
|
169 |
+
|
170 |
+
Parameters
|
171 |
+
----------
|
172 |
+
model: nn.Module
|
173 |
+
The PyTorch model to be trained.
|
174 |
+
criterion: nn.Module
|
175 |
+
The loss criterion used for training.
|
176 |
+
metrics: Dict[str, Callable]
|
177 |
+
Dictionary of metrics functions to evaluate the model's performance.
|
178 |
+
batch_size: int = 16
|
179 |
+
The batch size for training and evaluation
|
180 |
+
num_epochs : int = 10
|
181 |
+
The number of training epochs.
|
182 |
+
learning_rate: float = 1.0e-3
|
183 |
+
The learning rate for the optimizer.
|
184 |
+
weight_decay: float = 0.0
|
185 |
+
The weight decay for the optimizer.
|
186 |
+
device : str = "cpu"
|
187 |
+
The device to use for training and evaluation.
|
188 |
+
optimizer: Callable = Adam
|
189 |
+
The optimizer class to use.
|
190 |
+
train_step: Callable = slide_level_train_step
|
191 |
+
The function for training step.
|
192 |
+
val_step: Callable = slide_level_val_step
|
193 |
+
The function for validation step.
|
194 |
+
collator: Optional[Callable] = None
|
195 |
+
The collator function for data preprocessing.
|
196 |
+
"""
|
197 |
+
|
198 |
+
def __init__(
|
199 |
+
self,
|
200 |
+
model: nn.Module,
|
201 |
+
criterion: nn.Module,
|
202 |
+
metrics: Dict[str, Callable],
|
203 |
+
batch_size: int = 16,
|
204 |
+
num_epochs: int = 10,
|
205 |
+
learning_rate: float = 1.0e-3,
|
206 |
+
weight_decay: float = 0.0,
|
207 |
+
device: str = "cpu",
|
208 |
+
optimizer: Callable = Adam,
|
209 |
+
train_step: Callable = slide_level_train_step,
|
210 |
+
val_step: Callable = slide_level_val_step,
|
211 |
+
collator: Optional[Callable] = None,
|
212 |
+
):
|
213 |
+
super().__init__()
|
214 |
+
self.model = model
|
215 |
+
self.criterion = criterion
|
216 |
+
self.optimizer = optimizer
|
217 |
+
self.metrics = metrics
|
218 |
+
|
219 |
+
self.train_step = train_step
|
220 |
+
self.val_step = val_step
|
221 |
+
|
222 |
+
self.num_epochs = num_epochs
|
223 |
+
self.batch_size = batch_size
|
224 |
+
self.learning_rate = learning_rate
|
225 |
+
self.weight_decay = weight_decay
|
226 |
+
|
227 |
+
self.collator = collator
|
228 |
+
self.device = device
|
229 |
+
|
230 |
+
self.train_losses: List[float]
|
231 |
+
self.val_losses: List[float]
|
232 |
+
self.train_metrics: Dict[str, List[float]]
|
233 |
+
self.val_metrics: Dict[str, List[float]]
|
234 |
+
|
235 |
+
def train(
|
236 |
+
self,
|
237 |
+
train_set: Subset,
|
238 |
+
val_set: Subset,
|
239 |
+
) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:
|
240 |
+
"""
|
241 |
+
Train the model using the provided training and validation datasets.
|
242 |
+
|
243 |
+
Parameters
|
244 |
+
----------
|
245 |
+
train_set: Subset
|
246 |
+
The training dataset.
|
247 |
+
val_set: Subset
|
248 |
+
The validation dataset.
|
249 |
+
|
250 |
+
Returns
|
251 |
+
-------
|
252 |
+
Tuple[Dict[str, List[float]], Dict[str, List[float]]]
|
253 |
+
2 dictionaries containing the training and validation metrics for each epoch.
|
254 |
+
"""
|
255 |
+
# Dataloaders.
|
256 |
+
train_dataloader = DataLoader(
|
257 |
+
dataset=train_set,
|
258 |
+
shuffle=True,
|
259 |
+
batch_size=self.batch_size,
|
260 |
+
pin_memory=True,
|
261 |
+
collate_fn=self.collator,
|
262 |
+
drop_last=True,
|
263 |
+
)
|
264 |
+
val_dataloader = DataLoader(
|
265 |
+
dataset=val_set,
|
266 |
+
shuffle=False,
|
267 |
+
batch_size=self.batch_size,
|
268 |
+
pin_memory=True,
|
269 |
+
collate_fn=self.collator,
|
270 |
+
drop_last=False,
|
271 |
+
)
|
272 |
+
|
273 |
+
# Prepare modules.
|
274 |
+
model = self.model.to(self.device)
|
275 |
+
criterion = self.criterion.to(self.device)
|
276 |
+
optimizer = self.optimizer(
|
277 |
+
params=model.parameters(),
|
278 |
+
lr=self.learning_rate,
|
279 |
+
weight_decay=self.weight_decay,
|
280 |
+
)
|
281 |
+
|
282 |
+
# Training.
|
283 |
+
train_losses, val_losses = [], []
|
284 |
+
train_metrics: Dict[str, List[float]] = {
|
285 |
+
k: [] for k in self.metrics.keys()
|
286 |
+
}
|
287 |
+
val_metrics: Dict[str, List[float]] = {
|
288 |
+
k: [] for k in self.metrics.keys()
|
289 |
+
}
|
290 |
+
for ep in range(self.num_epochs):
|
291 |
+
# Train step.
|
292 |
+
(
|
293 |
+
train_epoch_loss,
|
294 |
+
train_epoch_logits,
|
295 |
+
train_epoch_labels,
|
296 |
+
) = self.train_step(
|
297 |
+
model=model,
|
298 |
+
train_dataloader=train_dataloader,
|
299 |
+
criterion=criterion,
|
300 |
+
optimizer=optimizer,
|
301 |
+
device=self.device,
|
302 |
+
)
|
303 |
+
|
304 |
+
# Inference step.
|
305 |
+
val_epoch_loss, val_epoch_logits, val_epoch_labels = self.val_step(
|
306 |
+
model=model,
|
307 |
+
val_dataloader=val_dataloader,
|
308 |
+
criterion=criterion,
|
309 |
+
device=self.device,
|
310 |
+
)
|
311 |
+
|
312 |
+
# Compute metrics.
|
313 |
+
for k, m in self.metrics.items():
|
314 |
+
train_metric = m(train_epoch_labels, train_epoch_logits)
|
315 |
+
val_metric = m(val_epoch_labels, val_epoch_logits)
|
316 |
+
|
317 |
+
train_metrics[k].append(train_metric)
|
318 |
+
val_metrics[k].append(val_metric)
|
319 |
+
|
320 |
+
print(
|
321 |
+
f"Epoch {ep+1}: train_loss={train_epoch_loss:.5f}, train_{k}={train_metric:.4f}, val_loss={val_epoch_loss:.5f}, val_{k}={val_metric:.4f}"
|
322 |
+
)
|
323 |
+
|
324 |
+
train_losses.append(train_epoch_loss)
|
325 |
+
val_losses.append(val_epoch_loss)
|
326 |
+
|
327 |
+
self.train_losses = train_losses
|
328 |
+
self.val_losses = val_losses
|
329 |
+
self.train_metrics = train_metrics
|
330 |
+
self.val_metrics = val_metrics
|
331 |
+
|
332 |
+
return train_metrics, val_metrics
|
333 |
+
|
334 |
+
def evaluate(
|
335 |
+
self,
|
336 |
+
test_set: Subset,
|
337 |
+
) -> Dict[str, float]:
|
338 |
+
"""Evaluate the model using the provided test dataset.
|
339 |
+
|
340 |
+
Parameters
|
341 |
+
----------
|
342 |
+
test_set: Subset
|
343 |
+
The test dataset.
|
344 |
+
|
345 |
+
Returns
|
346 |
+
-------
|
347 |
+
Dict[str, float]
|
348 |
+
A dictionary containing the test metrics.
|
349 |
+
"""
|
350 |
+
# Dataloader.
|
351 |
+
test_dataloader = DataLoader(
|
352 |
+
dataset=test_set,
|
353 |
+
shuffle=False,
|
354 |
+
batch_size=self.batch_size,
|
355 |
+
pin_memory=True,
|
356 |
+
collate_fn=self.collator,
|
357 |
+
drop_last=False,
|
358 |
+
)
|
359 |
+
|
360 |
+
# Prepare modules.
|
361 |
+
model = self.model.to(self.device)
|
362 |
+
criterion = self.criterion.to(self.device)
|
363 |
+
|
364 |
+
# Inference step.
|
365 |
+
_, test_epoch_logits, test_epoch_labels = self.val_step(
|
366 |
+
model=model,
|
367 |
+
val_dataloader=test_dataloader,
|
368 |
+
criterion=criterion,
|
369 |
+
device=self.device,
|
370 |
+
)
|
371 |
+
|
372 |
+
# Compute metrics.
|
373 |
+
test_metrics = {
|
374 |
+
k: m(test_epoch_labels, test_epoch_logits)
|
375 |
+
for k, m in self.metrics.items()
|
376 |
+
}
|
377 |
+
|
378 |
+
return test_metrics
|
379 |
+
|
380 |
+
def predict(
|
381 |
+
self,
|
382 |
+
test_set: Subset,
|
383 |
+
) -> Tuple[np.array, np.array]:
|
384 |
+
"""Make predictions using the provided test dataset.
|
385 |
+
|
386 |
+
Parameters
|
387 |
+
----------
|
388 |
+
test_set: Subset
|
389 |
+
The test dataset.
|
390 |
+
|
391 |
+
Returns
|
392 |
+
--------
|
393 |
+
Tuple[np.array, np.array]
|
394 |
+
A tuple containing the test labels and logits.
|
395 |
+
"""
|
396 |
+
# Dataloader
|
397 |
+
test_dataloader = DataLoader(
|
398 |
+
dataset=test_set,
|
399 |
+
shuffle=False,
|
400 |
+
batch_size=self.batch_size,
|
401 |
+
pin_memory=True,
|
402 |
+
collate_fn=self.collator,
|
403 |
+
drop_last=False,
|
404 |
+
)
|
405 |
+
|
406 |
+
# Prepare modules
|
407 |
+
model = self.model.to(self.device)
|
408 |
+
criterion = self.criterion.to(self.device)
|
409 |
+
|
410 |
+
# Val step
|
411 |
+
_, test_epoch_logits, test_epoch_labels = self.val_step(
|
412 |
+
model=model,
|
413 |
+
val_dataloader=test_dataloader,
|
414 |
+
criterion=criterion,
|
415 |
+
device=self.device,
|
416 |
+
)
|
417 |
+
|
418 |
+
return test_epoch_labels, test_epoch_logits
|
419 |
+
|
420 |
+
def compute_metrics(
|
421 |
+
self, labels: np.array, logits: np.array
|
422 |
+
) -> Dict[str, float]:
|
423 |
+
"""Compute metrics using the provided labels and logits.
|
424 |
+
|
425 |
+
Parameters
|
426 |
+
----------
|
427 |
+
labels: np.ndarray
|
428 |
+
The ground truth labels.
|
429 |
+
logits: np.ndarray
|
430 |
+
The predicted logits.
|
431 |
+
|
432 |
+
Returns:
|
433 |
+
Dict[str, float]
|
434 |
+
A dictionary containing the computed metrics.
|
435 |
+
"""
|
436 |
+
test_metrics = {
|
437 |
+
k: metric(labels, logits) for k, metric in self.metrics.items()
|
438 |
+
}
|
439 |
+
return test_metrics
|
440 |
+
|
utils.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Owkin, Inc.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Copyright (c) Owkin, Inc.
|
8 |
+
# All rights reserved.
|
9 |
+
#
|
10 |
+
# This source code is licensed under the license found in the
|
11 |
+
# LICENSE file in the root directory of this source tree.
|
12 |
+
|
13 |
+
|
14 |
+
import pickle
|
15 |
+
from pathlib import Path
|
16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
17 |
+
|
18 |
+
import datasets
|
19 |
+
import numpy as np
|
20 |
+
import torch
|
21 |
+
from sklearn.metrics import roc_auc_score
|
22 |
+
from torch.utils.data.dataloader import default_collate
|
23 |
+
|
24 |
+
|
25 |
+
def pad_collate_fn(
|
26 |
+
batch: List[Tuple[torch.Tensor, Any]],
|
27 |
+
batch_first: bool = True,
|
28 |
+
max_len: Optional[int] = None,
|
29 |
+
) -> Tuple[torch.Tensor, torch.BoolTensor, Any]:
|
30 |
+
"""Pad together sequences of arbitrary lengths.
|
31 |
+
Add a mask of the padding to the samples that can later be used
|
32 |
+
to ignore padding in activation functions.
|
33 |
+
|
34 |
+
Expected to be used in combination of a torch.utils.datasets.DataLoader.
|
35 |
+
|
36 |
+
Expect the sequences to be padded to be the first one in the sample tuples.
|
37 |
+
Others members will be batched using ``torch.utils.data.dataloader.default_collate``.
|
38 |
+
|
39 |
+
Parameters
|
40 |
+
----------
|
41 |
+
batch: List[Tuple[torch.Tensor, Any]]
|
42 |
+
List of tuples (features, Any). Features have shape (N_slides_tiles, F)
|
43 |
+
with ``N_slides_tiles`` being specific to each slide depending on the
|
44 |
+
number of extractable tiles in the tissue matter. ``F`` is the feature
|
45 |
+
extractor output dimension.
|
46 |
+
batch_first: bool = True
|
47 |
+
Either return (B, N_TILES, F) or (N_TILES, B, F)
|
48 |
+
max_len: Optional[int] = None
|
49 |
+
Pre-defined maximum length for elements inside a batch.
|
50 |
+
|
51 |
+
Returns
|
52 |
+
-------
|
53 |
+
padded_sequences, masks, Any: Tuple[torch.Tensor, torch.BoolTensor, Any]
|
54 |
+
- if batch_first: Tuple[(B, N_TILES, F), (B, N_TILES, 1), ...]
|
55 |
+
- else: Tuple[(N_TILES, B, F), (N_TILES, B, 1), ...]
|
56 |
+
|
57 |
+
with N_TILES = max_len if max_len is not None
|
58 |
+
or N_TILES = max length of the training samples.
|
59 |
+
|
60 |
+
"""
|
61 |
+
# Expect the sequences to be the first one in the sample tuples
|
62 |
+
sequences = []
|
63 |
+
others = []
|
64 |
+
for sample in batch:
|
65 |
+
sequences.append(sample[0])
|
66 |
+
others.append(sample[1:])
|
67 |
+
|
68 |
+
if max_len is None:
|
69 |
+
max_len = max([s.size(0) for s in sequences])
|
70 |
+
|
71 |
+
trailing_dims = sequences[0].size()[1:]
|
72 |
+
|
73 |
+
if batch_first:
|
74 |
+
padded_dims = (len(sequences), max_len) + trailing_dims
|
75 |
+
masks_dims = (len(sequences), max_len, 1)
|
76 |
+
else:
|
77 |
+
padded_dims = (max_len, len(sequences)) + trailing_dims
|
78 |
+
masks_dims = (max_len, len(sequences), 1)
|
79 |
+
|
80 |
+
padded_sequences = sequences[0].data.new(*padded_dims).fill_(0.0)
|
81 |
+
masks = torch.ones(*masks_dims, dtype=torch.bool)
|
82 |
+
|
83 |
+
for i, tensor in enumerate(sequences):
|
84 |
+
length = tensor.size(0)
|
85 |
+
# use index notation to prevent duplicate references to the tensor
|
86 |
+
if batch_first:
|
87 |
+
padded_sequences[i, :length, ...] = tensor[:max_len, ...]
|
88 |
+
masks[i, :length, ...] = False
|
89 |
+
else:
|
90 |
+
padded_sequences[:length, i, ...] = tensor[:max_len, ...]
|
91 |
+
masks[:length, i, ...] = False
|
92 |
+
|
93 |
+
# Batching other members of the tuple using default_collate
|
94 |
+
others = default_collate(others)
|
95 |
+
|
96 |
+
return (padded_sequences, masks, *others)
|
97 |
+
|
98 |
+
|
99 |
+
def auc(labels: np.array, logits: np.array) -> float:
|
100 |
+
"""ROC AUC score for binary classification.
|
101 |
+
Parameters
|
102 |
+
----------
|
103 |
+
labels: np.array
|
104 |
+
Labels of the outcome.
|
105 |
+
logits: np.array
|
106 |
+
Probabilities.
|
107 |
+
"""
|
108 |
+
preds = 1.0 / (1.0 + np.exp(-logits))
|
109 |
+
return roc_auc_score(labels, preds)
|
110 |
+
|
111 |
+
|
112 |
+
def get_cv_metrics(
|
113 |
+
cv_metrics: List[Dict[str, float]], epoch: int = -1
|
114 |
+
) -> Dict[str, float]:
|
115 |
+
"""Get mean and std from cross-validation metrics at a given epoch."""
|
116 |
+
cv_mean_metrics = {}
|
117 |
+
metrics_names = cv_metrics[0].keys()
|
118 |
+
for m_name in metrics_names:
|
119 |
+
values = [fold_metrics[m_name][epoch] for fold_metrics in cv_metrics]
|
120 |
+
mean_metric, std_metric = np.mean(values), np.std(values)
|
121 |
+
cv_mean_metrics[m_name] = f"{mean_metric:.4f} ± {std_metric:.4f}"
|
122 |
+
return cv_mean_metrics
|
123 |
+
|