foveation.py 5 KB
Newer Older
BobYeah's avatar
sync    
BobYeah committed
1
2
3
4
import math
import torch
import torch.nn.functional as nn_f
from typing import List, Tuple
Nianchen Deng's avatar
sync    
Nianchen Deng committed
5
6
7
from utils import img
from utils import view
from utils import misc
BobYeah's avatar
sync    
BobYeah committed
8
9
10
11


class Foveation(object):

12
13
14
15
    def __init__(self, layers_fov: List[float], layers_res: List[Tuple[float, float]],
                 out_res: Tuple[int, int], *, blend=0.6, device=None):
        self.layers_fov = layers_fov
        self.layers_res = layers_res
BobYeah's avatar
sync    
BobYeah committed
16
        self.out_res = out_res
17
        self.blend = blend
BobYeah's avatar
sync    
BobYeah committed
18
        self.device = device
19
        self.n_layers = len(self.layers_fov)
BobYeah's avatar
sync    
BobYeah committed
20
21
22
23
        self.eye_fovea_blend = [
            self._gen_layer_blendmap(i)
            for i in range(self.n_layers - 1)
        ]  # blend maps of fovea layers
Nianchen Deng's avatar
sync    
Nianchen Deng committed
24
        self.coords = misc.meshgrid(*out_res).to(device=device)
BobYeah's avatar
sync    
BobYeah committed
25
26

    def to(self, device):
Nianchen Deng's avatar
sync    
Nianchen Deng committed
27
28
29
        self.eye_fovea_blend = [x.to(device=device)
                                for x in self.eye_fovea_blend]
        self.coords = self.coords.to(device=device)
BobYeah's avatar
sync    
BobYeah committed
30
31
        return self

Nianchen Deng's avatar
sync    
Nianchen Deng committed
32
    def synthesis(self, layers: List[torch.Tensor],
Nianchen Deng's avatar
Nianchen Deng committed
33
34
                  fovea_center: Tuple[float, float],
                  shifts: List[int] = None) -> torch.Tensor:
BobYeah's avatar
sync    
BobYeah committed
35
36
37
38
        """
        Generate foveated retinal image by blending fovea layers
        **Note: current implementation only support two fovea layers**

Nianchen Deng's avatar
sync    
Nianchen Deng committed
39
40
        :param layers `List(Tensor(B, C, H'{l}, W'{l}))`: list of foveated layers
        :return `Tensor(B, C, H:out, W:out)`: foveated images
BobYeah's avatar
sync    
BobYeah committed
41
42
        """
        output: torch.Tensor = nn_f.interpolate(layers[-1], self.out_res,
Nianchen Deng's avatar
sync    
Nianchen Deng committed
43
                                                mode='bilinear', align_corners=False)
Nianchen Deng's avatar
Nianchen Deng committed
44
        if shifts != None:
Nianchen Deng's avatar
sync    
Nianchen Deng committed
45
            output = img.horizontal_shift(output, shifts[-1])
Nianchen Deng's avatar
sync    
Nianchen Deng committed
46
        c = torch.tensor([
Nianchen Deng's avatar
sync    
Nianchen Deng committed
47
48
            fovea_center[0] + self.out_res[1] / 2,
            fovea_center[1] + self.out_res[0] / 2
Nianchen Deng's avatar
sync    
Nianchen Deng committed
49
        ], device=self.coords.device)
BobYeah's avatar
sync    
BobYeah committed
50
        for i in range(self.n_layers - 2, -1, -1):
Nianchen Deng's avatar
sync    
Nianchen Deng committed
51
52
53
54
            if layers[i] == None:
                continue
            R = self.get_layer_size_in_final_image(i) / 2
            grid = ((self.coords - c) / R)[None, ...]
Nianchen Deng's avatar
Nianchen Deng committed
55
            if shifts != None:
Nianchen Deng's avatar
sync    
Nianchen Deng committed
56
                grid = img.horizontal_shift(grid, shifts[i], -2)
57
58
            # (1, 1, H:out, W:out)
            blend = nn_f.grid_sample(self.eye_fovea_blend[i][None, None, ...], grid)
Nianchen Deng's avatar
sync    
Nianchen Deng committed
59
            output.mul_(1 - blend).add_(nn_f.grid_sample(layers[i], grid) * blend)
BobYeah's avatar
sync    
BobYeah committed
60
61
62
63
64
65
66
67
68
        return output

    def get_layer_size_in_final_image(self, i: int) -> int:
        """
        Get size of layer i in final image

        :param i: index of layer
        :return: size of layer i in final image (in pixels)
        """
69
70
71
72
73
74
75
76
77
78
79
80
81
82
        return self.get_source_layer_cover_size_in_target_layer(
            self.layers_fov[i], self.layers_fov[-1], self.out_res[0])

    def get_source_layer_cover_size_in_target_layer(self, source_fov, target_fov,
                                                    target_pixel_height) -> int:
        """
        Get size of layer i in final image

        :param i: index of layer
        :return: size of layer i in final image (in pixels)
        """
        source_physical_height = view.fov2length(source_fov)
        target_physical_height = view.fov2length(target_fov)
        return int(math.ceil(target_pixel_height * source_physical_height / target_physical_height))
BobYeah's avatar
sync    
BobYeah committed
83
84
85
86
87
88

    def _gen_layer_blendmap(self, i: int) -> torch.Tensor:
        """
        Generate blend map for fovea layer i

        :param i: index of fovea layer
Nianchen Deng's avatar
sync    
Nianchen Deng committed
89
        :return `Tensor(H{i}, W{i})`: blend map
BobYeah's avatar
sync    
BobYeah committed
90
91
92
        """
        size = self.get_layer_size_in_final_image(i)
        R = size / 2
Nianchen Deng's avatar
sync    
Nianchen Deng committed
93
        p = misc.meshgrid(size, size).to(device=self.device)  # (size, size, 2)
BobYeah's avatar
sync    
BobYeah committed
94
        r = torch.norm(p - R, dim=2)  # (size, size, 2)
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
        return misc.smooth_step(R, R * self.blend, r)

    def get_layers_mask(self) -> List[torch.Tensor]:
        """
        Generate mask images for layers[:-1]
        the meaning of values in mask images:
        -1: skipped
        0~1: blend with inner layer
        1~2: only self layer
        2~3: blend with outer layer

        :return: Mask images for layers except outermost
        """
        layers_mask = []
        for i in range(self.n_layers - 1):
            layers_mask.append(torch.ones(*self.layers_res[i], device=self.device) * -1)
            r = torch.norm(misc.meshgrid(*self.layers_res[i], normalize=True).to(device=self.device) * 2 - 1, dim=-1)
            inner_radius = self.get_source_layer_cover_size_in_target_layer(
                self.layers_fov[i - 1], self.layers_fov[i],
                self.layers_res[i][0]) / self.layers_res[i][0] if i > 0 else 0
            bounds = [inner_radius * (1 - self.blend), inner_radius, self.blend, 1]
            for bi in range(len(bounds) - 1):
                region = torch.logical_and(r > bounds[bi], r <= bounds[bi + 1])
                layers_mask[i][region] = bi + \
                    (r[region] - bounds[bi]) / (bounds[bi + 1] - bounds[bi])
        return layers_mask