|
|
import torch |
|
|
import pytorch_lightning as pl |
|
|
import torch.nn as nn |
|
|
from torch import optim |
|
|
import numpy as np |
|
|
|
|
|
import wandb |
|
|
import matplotlib.pyplot as plt |
|
|
from models.basics_model import get_grid2D,FC_nn |
|
|
from scipy.io import loadmat |
|
|
|
|
|
import os |
|
|
class LpLoss(object): |
|
|
def __init__(self, d=2, p=2, size_average=True, reduction=True): |
|
|
super(LpLoss, self).__init__() |
|
|
assert d > 0 and p > 0 |
|
|
self.d = d |
|
|
self.p = p |
|
|
self.reduction = reduction |
|
|
self.size_average = size_average |
|
|
def abs(self, x, y): |
|
|
num_examples = x.size()[0] |
|
|
h = 1.0 / (x.size()[1] - 1.0) |
|
|
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1) |
|
|
if self.reduction: |
|
|
if self.size_average: |
|
|
return torch.mean(all_norms) |
|
|
else: |
|
|
return torch.sum(all_norms) |
|
|
return all_norms |
|
|
|
|
|
def rel(self, x, y): |
|
|
num_examples = x.size()[0] |
|
|
|
|
|
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1) |
|
|
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1) |
|
|
|
|
|
if self.reduction: |
|
|
if self.size_average: |
|
|
return torch.mean(diff_norms/y_norms) |
|
|
else: |
|
|
return torch.sum(diff_norms/y_norms) |
|
|
|
|
|
return diff_norms/y_norms |
|
|
|
|
|
def __call__(self, x, y): |
|
|
return self.rel(x, y) |
|
|
|
|
|
|
|
|
class RRMSE(object): |
|
|
def __init__(self, ): |
|
|
super(RRMSE, self).__init__() |
|
|
|
|
|
def __call__(self, x, y): |
|
|
num_examples = x.size()[0] |
|
|
norm = torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), 2 , 1)**2 |
|
|
normy = torch.norm( y.view(num_examples,-1), 2 , 1)**2 |
|
|
mean_norm = torch.mean((norm/normy)**(1/2)) |
|
|
return mean_norm |
|
|
|
|
|
|
|
|
|
|
|
def get_unet_model(in_ch=1, out_ch=1, scales=5, skip=4, |
|
|
channels=(32, 32, 64, 64, 128, 128), use_sigmoid=True, |
|
|
use_norm=True): |
|
|
|
|
|
skip_channels = [skip] * (scales) |
|
|
return UNet_module(in_ch=in_ch, out_ch=out_ch, channels=channels[:scales], |
|
|
skip_channels=skip_channels, use_sigmoid=use_sigmoid, |
|
|
use_norm=use_norm) |
|
|
|
|
|
|
|
|
class DownBlock(nn.Module): |
|
|
''' |
|
|
Down sampling |
|
|
''' |
|
|
def __init__(self, in_ch, out_ch, kernel_size=3, num_groups=4, use_norm=True): |
|
|
super(DownBlock, self).__init__() |
|
|
to_pad = int((kernel_size - 1) / 2) |
|
|
if use_norm: |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch, out_ch, kernel_size, |
|
|
stride=2, padding=to_pad), |
|
|
nn.GroupNorm(num_channels=out_ch, num_groups=num_groups), |
|
|
nn.LeakyReLU(0.2, inplace=True), |
|
|
nn.Conv2d(out_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.GroupNorm(num_channels=out_ch, num_groups=num_groups), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
else: |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch, out_ch, kernel_size, |
|
|
stride=2, padding=to_pad), |
|
|
nn.LeakyReLU(0.2, inplace=True), |
|
|
nn.Conv2d(out_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.conv(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class InBlock(nn.Module): |
|
|
def __init__(self, in_ch, out_ch, kernel_size=3, num_groups=2, use_norm=True): |
|
|
super(InBlock, self).__init__() |
|
|
to_pad = int((kernel_size - 1) / 2) |
|
|
if use_norm: |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.GroupNorm(num_channels=out_ch, num_groups=num_groups), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
else: |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.conv(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class UpBlock(nn.Module): |
|
|
def __init__(self, in_ch, out_ch, skip_ch=4, kernel_size=3, num_groups=2, use_norm=True): |
|
|
super(UpBlock, self).__init__() |
|
|
to_pad = int((kernel_size - 1) / 2) |
|
|
self.skip = skip_ch > 0 |
|
|
if skip_ch == 0: |
|
|
skip_ch = 1 |
|
|
if use_norm: |
|
|
self.conv = nn.Sequential( |
|
|
nn.GroupNorm(num_channels=in_ch + skip_ch, num_groups=1), |
|
|
nn.Conv2d(in_ch + skip_ch, out_ch, kernel_size, stride=1, |
|
|
padding=to_pad), |
|
|
nn.GroupNorm(num_channels=out_ch, num_groups=num_groups), |
|
|
nn.LeakyReLU(0.2, inplace=True), |
|
|
nn.Conv2d(out_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.GroupNorm(num_channels=out_ch, num_groups=num_groups), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
else: |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch + skip_ch, out_ch, kernel_size, stride=1, |
|
|
padding=to_pad), |
|
|
nn.LeakyReLU(0.2, inplace=True), |
|
|
nn.Conv2d(out_ch, out_ch, kernel_size, |
|
|
stride=1, padding=to_pad), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
|
|
|
if use_norm: |
|
|
self.skip_conv = nn.Sequential( |
|
|
nn.Conv2d(out_ch, skip_ch, kernel_size=1, stride=1), |
|
|
nn.GroupNorm(num_channels=skip_ch, num_groups=1), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
else: |
|
|
self.skip_conv = nn.Sequential( |
|
|
nn.Conv2d(out_ch, skip_ch, kernel_size=1, stride=1), |
|
|
nn.LeakyReLU(0.2, inplace=True)) |
|
|
|
|
|
self.up = nn.Upsample(scale_factor=2, mode='bilinear', |
|
|
align_corners=True) |
|
|
self.concat = Concat() |
|
|
|
|
|
def forward(self, x1, x2): |
|
|
x1 = self.up(x1) |
|
|
x2 = self.skip_conv(x2) |
|
|
if not self.skip: |
|
|
x2 = x2 * 0 |
|
|
x = self.concat(x1, x2) |
|
|
x = self.conv(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class Concat(nn.Module): |
|
|
def __init__(self): |
|
|
super(Concat, self).__init__() |
|
|
|
|
|
def forward(self, *inputs): |
|
|
inputs_shapes2 = [x.shape[2] for x in inputs] |
|
|
inputs_shapes3 = [x.shape[3] for x in inputs] |
|
|
|
|
|
if (np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and |
|
|
np.all(np.array(inputs_shapes3) == min(inputs_shapes3))): |
|
|
inputs_ = inputs |
|
|
else: |
|
|
target_shape2 = min(inputs_shapes2) |
|
|
target_shape3 = min(inputs_shapes3) |
|
|
|
|
|
inputs_ = [] |
|
|
for inp in inputs: |
|
|
diff2 = (inp.size(2) - target_shape2) // 2 |
|
|
diff3 = (inp.size(3) - target_shape3) // 2 |
|
|
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, |
|
|
diff3:diff3 + target_shape3]) |
|
|
return torch.cat(inputs_, dim=1) |
|
|
|
|
|
|
|
|
class OutBlock(nn.Module): |
|
|
def __init__(self, in_ch, out_ch): |
|
|
super(OutBlock, self).__init__() |
|
|
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.conv(x) |
|
|
return x |
|
|
|
|
|
def __len__(self): |
|
|
return len(self._modules) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class UNet_module(nn.Module): |
|
|
def __init__(self, in_ch, out_ch, channels, skip_channels, |
|
|
use_sigmoid=True, use_norm=True): |
|
|
super(UNet_module, self).__init__() |
|
|
|
|
|
assert (len(channels) == len(skip_channels)) |
|
|
self.scales = len(channels) |
|
|
self.use_sigmoid = use_sigmoid |
|
|
self.down = nn.ModuleList() |
|
|
self.up = nn.ModuleList() |
|
|
self.inc = InBlock(in_ch, channels[0], use_norm=use_norm) |
|
|
for i in range(1, self.scales): |
|
|
self.down.append(DownBlock(in_ch=channels[i - 1], |
|
|
out_ch=channels[i], |
|
|
use_norm=use_norm)) |
|
|
for i in range(1, self.scales): |
|
|
self.up.append(UpBlock(in_ch=channels[-i], |
|
|
out_ch=channels[-i - 1], |
|
|
skip_ch=skip_channels[-i], |
|
|
use_norm=use_norm)) |
|
|
self.outc = OutBlock(in_ch=channels[0], |
|
|
out_ch=out_ch) |
|
|
|
|
|
def forward(self, x0): |
|
|
xs = [self.inc(x0), ] |
|
|
for i in range(self.scales - 1): |
|
|
xs.append(self.down[i](xs[-1])) |
|
|
x = xs[-1] |
|
|
for i in range(self.scales - 1): |
|
|
x = self.up[i](x, xs[-2 - i]) |
|
|
|
|
|
return torch.sigmoid(self.outc(x)) if self.use_sigmoid else self.outc(x) |
|
|
|
|
|
|
|
|
|
|
|
class UNet_pretrain(pl.LightningModule): |
|
|
""" |
|
|
""" |
|
|
def __init__(self, |
|
|
in_ch=1, |
|
|
out_ch=2, |
|
|
scales=16, |
|
|
skip=4, |
|
|
source_type = 'theta', |
|
|
channels=[60,60,60,60,120,120,120,120,240, 240, 240,240,480, 480,480, 480], |
|
|
use_sigmoid=False, |
|
|
use_norm=True, |
|
|
learning_rate=0.001, |
|
|
step_size = 5, |
|
|
gamma = 0.5, |
|
|
weight_decay = 0.00001, |
|
|
eta_min = 5e-4, |
|
|
loss = 'rel_l2', |
|
|
F_feature = False, |
|
|
add_term = False, |
|
|
val_exp = False, |
|
|
src_path_breast = '', |
|
|
gt_path_breast = '', |
|
|
src_path_arm = '', |
|
|
gt_path_arm = '', |
|
|
src_path_limb = '', |
|
|
gt_path_limb = '' |
|
|
): |
|
|
super().__init__() |
|
|
self.in_ch = in_ch |
|
|
self.with_grid = True |
|
|
if self.with_grid == True: |
|
|
self.in_ch +=2 |
|
|
self.source_type = source_type |
|
|
if self.source_type == 'source': |
|
|
self.in_ch +=2 |
|
|
elif self.source_type == 'theta': |
|
|
self.in_ch +=2 |
|
|
self.out_ch = out_ch |
|
|
self.channels = channels |
|
|
self.skip = skip |
|
|
self.use_sigmoid = use_sigmoid |
|
|
self.use_norm = use_norm |
|
|
self.scales = scales |
|
|
self.unet = self.build_unet() |
|
|
self.save_hyperparameters() |
|
|
self.learning_rate = learning_rate |
|
|
self.step_size = step_size |
|
|
self.gamma = gamma |
|
|
self.weight_decay = weight_decay |
|
|
self.F_feature = F_feature |
|
|
self.add_term = add_term |
|
|
self.eta_min = eta_min |
|
|
if loss == 'l1': |
|
|
self.criterion = nn.L1Loss() |
|
|
self.criterion_val = RRMSE() |
|
|
elif loss == 'l2': |
|
|
self.criterion = nn.MSELoss() |
|
|
self.criterion_val = RRMSE() |
|
|
|
|
|
elif loss == 'smooth_l1': |
|
|
self.criterion = nn.SmoothL1Loss() |
|
|
self.criterion_val = RRMSE() |
|
|
elif loss == "rel_l2": |
|
|
self.criterion =LpLoss() |
|
|
self.criterion_val = RRMSE() |
|
|
self.F_feature = F_feature |
|
|
self.add_term = add_term |
|
|
self.val_iter = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, sos,src): |
|
|
if self.source_type == 'theta': |
|
|
src_input = src[:,:,:,:] |
|
|
elif self.source_type == 'source': |
|
|
src_input = src[:,:,:,0:2] |
|
|
x = sos |
|
|
field = src[...,:2].clone() |
|
|
if self.with_grid == True: |
|
|
grid = get_grid2D(x.shape, x.device) |
|
|
x = torch.cat((x,grid,src_input), dim=-1) |
|
|
|
|
|
|
|
|
x = self.unet(x.permute(0,3,1,2)).contiguous() |
|
|
x = x.permute(0,2,3,1).contiguous() |
|
|
|
|
|
if self.add_term == True: |
|
|
x = torch.view_as_real(torch.view_as_complex(field)*(1+torch.view_as_complex(x))) |
|
|
return x |
|
|
def get_grid(self, shape, device): |
|
|
batchsize, size_x, size_y = shape[0], shape[1], shape[2] |
|
|
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float) |
|
|
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1]) |
|
|
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float) |
|
|
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1]) |
|
|
feature = [] |
|
|
gridxy = torch.cat((gridx,gridy), dim=-1).to(device) |
|
|
feature.append(gridxy) |
|
|
if self.F_feature == True: |
|
|
for i in range(1,3): |
|
|
feature.append(torch.sin(10**(-i)*gridxy)) |
|
|
return torch.cat(feature, dim=-1).to(device) |
|
|
|
|
|
def build_unet(self): |
|
|
""" |
|
|
Build UNet with group normalization |
|
|
Parameters |
|
|
---------- |
|
|
Returns |
|
|
------- |
|
|
torch.nn module |
|
|
UNet model |
|
|
""" |
|
|
return get_unet_model(in_ch=self.in_ch, out_ch=self.out_ch, scales=self.scales, skip=self.skip, |
|
|
channels=self.channels, use_sigmoid=self.use_sigmoid, |
|
|
use_norm=self.use_norm) |
|
|
|
|
|
def training_step(self, batch: torch.Tensor, batch_idx): |
|
|
|
|
|
|
|
|
sos,src,y, index = batch |
|
|
batch_size = sos.shape[0] |
|
|
out = self(sos,src) |
|
|
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1)) |
|
|
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True) |
|
|
wandb.log({"loss": loss.item()}) |
|
|
return loss |
|
|
|
|
|
def validation_step(self, val_batch: torch.Tensor, batch_idx): |
|
|
sos, src, y, index = val_batch |
|
|
split_index = (index[-1] + index[0])//2 |
|
|
batch_size = sos.shape[0] |
|
|
out = self(sos, src) |
|
|
val_loss = self.criterion_val(out.view(batch_size, -1), y.view(batch_size, -1)) |
|
|
|
|
|
return val_loss |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def configure_optimizers(self, optimizer=None, scheduler=None): |
|
|
if optimizer is None: |
|
|
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) |
|
|
if scheduler is None: |
|
|
|
|
|
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 6, gamma = 0.1) |
|
|
|
|
|
return { |
|
|
"optimizer": optimizer, |
|
|
"lr_scheduler": { |
|
|
"scheduler": scheduler |
|
|
}, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|