view.py 8.97 KB
Newer Older
Nianchen Deng's avatar
sync    
Nianchen Deng committed
1

Nianchen Deng's avatar
sync    
Nianchen Deng committed
2
from typing import List, Mapping, Tuple, Union
BobYeah's avatar
sync    
BobYeah committed
3
import torch
Nianchen Deng's avatar
sync    
Nianchen Deng committed
4
import math
5
import glm
Nianchen Deng's avatar
sync    
Nianchen Deng committed
6
7
8
9
10
from . import misc


def fov2length(angle):
    return math.tan(math.radians(angle) / 2) * 2
BobYeah's avatar
sync    
BobYeah committed
11
12
13
14
15
16
17


class CameraParam(object):

    def __init__(self, params: Mapping[str, Union[float, bool]],
                 res: Tuple[int, int], *, device=None) -> None:
        super().__init__()
18
        params = CameraParam.convert_camera_params(params, res)
BobYeah's avatar
sync    
BobYeah committed
19
20
21
        self.res = res
        self.f = torch.tensor([params['fx'], params['fy'], 1], device=device)
        self.c = torch.tensor([params['cx'], params['cy']], device=device)
Nianchen Deng's avatar
Nianchen Deng committed
22
        self.device = device
BobYeah's avatar
sync    
BobYeah committed
23
24
25
26

    def to(self, device: torch.device):
        self.f = self.f.to(device)
        self.c = self.c.to(device)
Nianchen Deng's avatar
Nianchen Deng committed
27
        self.device = device
BobYeah's avatar
sync    
BobYeah committed
28
29
        return self

Nianchen Deng's avatar
sync    
Nianchen Deng committed
30
31
32
33
34
35
    def resize(self, res: Tuple[int, int]):
        self.f[0] = self.f[0] / self.res[1] * res[1]
        self.f[1] = self.f[1] / self.res[0] * res[0]
        self.c[0] = self.c[0] / self.res[1] * res[1]
        self.c[1] = self.c[1] / self.res[0] * res[0]
        self.res = res
36

Nianchen Deng's avatar
Nianchen Deng committed
37
    def proj(self, p: torch.Tensor, normalize=False, center_as_origin=False) -> torch.Tensor:
BobYeah's avatar
sync    
BobYeah committed
38
39
40
        """
        Project positions in local space to image plane

Nianchen Deng's avatar
sync    
Nianchen Deng committed
41
        :param p `Tensor(..., 3)`: positions in local space
Nianchen Deng's avatar
Nianchen Deng committed
42
43
        :param normalize: use normalized coord for image plane
        :param center_as_origin: take center as the origin if image plane instead of top-left corner
Nianchen Deng's avatar
sync    
Nianchen Deng committed
44
        :return `Tensor(..., 2)`: positions in image plane
BobYeah's avatar
sync    
BobYeah committed
45
46
        """
        p = p * self.f
Nianchen Deng's avatar
Nianchen Deng committed
47
48
49
50
51
        p = p[..., 0:2] / p[..., 2:3]
        if not center_as_origin:
            p = p + self.c
        if normalize:
            p = p / torch.tensor([self.res[1], self.res[0]], device=self.device)
BobYeah's avatar
sync    
BobYeah committed
52
53
        return p

Nianchen Deng's avatar
Nianchen Deng committed
54
    def unproj(self, p: torch.Tensor, z: torch.Tensor = None, normalize=False, center_as_origin=False) -> torch.Tensor:
BobYeah's avatar
sync    
BobYeah committed
55
56
57
        """
        Unproject positions in image plane to local space

Nianchen Deng's avatar
sync    
Nianchen Deng committed
58
59
        :param p `Tensor(..., 2)`: positions in image plane
        :param z `Tensor(..., 1)`: depths of positions, None means all depths set to 1
Nianchen Deng's avatar
Nianchen Deng committed
60
61
        :param normalize: use normalized coord for image plane
        :param center_as_origin: take center as the origin if image plane instead of top-left corner
BobYeah's avatar
sync    
BobYeah committed
62
63
        :return: positions in local space
        """
Nianchen Deng's avatar
Nianchen Deng committed
64
65
66
67
        if normalize:
            p = p * torch.tensor([self.res[1], self.res[0]], device=self.device)
        if not center_as_origin:
            p = p - self.c
Nianchen Deng's avatar
sync    
Nianchen Deng committed
68
        p = misc.broadcast_cat(p / self.f[0:2], 1.0)
BobYeah's avatar
sync    
BobYeah committed
69
70
71
72
73
74
75
76
77
78
        if z != None:
            p = p * z
        return p

    def get_local_rays(self, flatten=False, norm=True) -> torch.Tensor:
        """
        Get view rays in local space

        :param flatten: whether flatten the return tensor
        :param norm: whether normalize rays to unit length
Nianchen Deng's avatar
sync    
Nianchen Deng committed
79
        :return `Tensor(H, W, 3)|Tensor(HW, 3)`: the shape is determined by parameter 'flatten'
BobYeah's avatar
sync    
BobYeah committed
80
        """
Nianchen Deng's avatar
sync    
Nianchen Deng committed
81
        coords = misc.meshgrid(*self.res).to(self.f.device)
BobYeah's avatar
sync    
BobYeah committed
82
83
84
85
86
87
88
        rays = self.unproj(coords)
        if norm:
            rays = rays / rays.norm(dim=-1, keepdim=True)
        if flatten:
            rays = rays.flatten(0, 1)
        return rays

Nianchen Deng's avatar
sync    
Nianchen Deng committed
89
    def get_global_rays(self, trans, flatten=False, norm=True) -> torch.Tensor:
BobYeah's avatar
sync    
BobYeah committed
90
91
92
        """
        [summary]

Nianchen Deng's avatar
sync    
Nianchen Deng committed
93
94
        :param t `Tensor(N.., 3)`: translation vectors
        :param r `Tensor(N.., 3, 3)`: rotation matrices
BobYeah's avatar
sync    
BobYeah committed
95
96
97
98
99
        :param flatten: [description], defaults to False
        :param norm: [description], defaults to True
        :return: [description]
        """
        rays = self.get_local_rays(flatten, norm)  # (M.., 3)
Nianchen Deng's avatar
sync    
Nianchen Deng committed
100
101
102
        rays_o, _ = torch.broadcast_tensors(trans.t[..., None, :], rays) if flatten \
            else torch.broadcast_tensors(trans.t[..., None, None, :], rays)  # (N.., M.., 3)
        rays_d = trans.trans_vector(rays)
BobYeah's avatar
sync    
BobYeah committed
103
104
        return rays_o, rays_d

105
106
107
    @staticmethod
    def convert_camera_params(input_camera_params: Mapping[str, Union[float, bool]],
                              view_res: Tuple[int, int]) -> Mapping[str, Union[float, bool]]:
BobYeah's avatar
sync    
BobYeah committed
108
109
110
111
112
113
114
115
116
117
        """
        Check and convert camera parameters in config file to pixel-space

        :param cam_params: { ["fx", "fy" | "fov"], "cx", "cy", ["normalized"] },
            the parameters of camera
        :return: camera parameters
        """
        input_is_normalized = bool(input_camera_params.get('normalized'))
        camera_params = {}
        if 'fov' in input_camera_params:
Nianchen Deng's avatar
sync    
Nianchen Deng committed
118
            if input_is_normalized:
Nianchen Deng's avatar
sync    
Nianchen Deng committed
119
                camera_params['fy'] = 1 / fov2length(input_camera_params['fov'])
Nianchen Deng's avatar
sync    
Nianchen Deng committed
120
121
122
                camera_params['fx'] = camera_params['fy'] / view_res[1] * view_res[0]
            else:
                camera_params['fx'] = camera_params['fy'] = view_res[0] / \
Nianchen Deng's avatar
sync    
Nianchen Deng committed
123
                    fov2length(input_camera_params['fov'])
BobYeah's avatar
sync    
BobYeah committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
            camera_params['fy'] *= -1
        else:
            camera_params['fx'] = input_camera_params['fx']
            camera_params['fy'] = input_camera_params['fy']
        camera_params['cx'] = input_camera_params['cx']
        camera_params['cy'] = input_camera_params['cy']
        if input_is_normalized:
            camera_params['fx'] *= view_res[1]
            camera_params['fy'] *= view_res[0]
            camera_params['cx'] *= view_res[1]
            camera_params['cy'] *= view_res[0]
        return camera_params


Nianchen Deng's avatar
sync    
Nianchen Deng committed
138
139
class Trans(object):

140
    def __init__(self, t: torch.Tensor, r: torch.Tensor):
Nianchen Deng's avatar
sync    
Nianchen Deng committed
141
142
143
144
145
146
147
148
149
150
        self.t = t
        self.r = r
        if len(self.t.size()) == 1:
            self.t = self.t[None, :]
            self.r = self.r[None, :, :]

    def trans_point(self, p: torch.Tensor, inverse=False) -> torch.Tensor:
        """
        Transform points by given translation vectors and rotation matrices

Nianchen Deng's avatar
sync    
Nianchen Deng committed
151
152
153
        :param p `Tensor(N.., 3)`: points to transform
        :param t `Tensor(M.., 3)`: translation vectors
        :param r `Tensor(M.., 3, 3)`: rotation matrices
Nianchen Deng's avatar
sync    
Nianchen Deng committed
154
        :param inverse: whether perform inverse transform
Nianchen Deng's avatar
sync    
Nianchen Deng committed
155
        :return `Tensor(M.., N.., 3)`: transformed points
Nianchen Deng's avatar
sync    
Nianchen Deng committed
156
157
158
159
160
        """
        size_N = list(p.size())[:-1]
        size_M = list(self.r.size())[:-2]
        out_size = size_M + size_N + [3]
        t_size = size_M + [1 for _ in range(len(size_N))] + [3]
161
        t = self.t.view(t_size)  # (M.., 1.., 3)
Nianchen Deng's avatar
sync    
Nianchen Deng committed
162
163
164
165
166
        if inverse:
            p = (p - t).view(size_M + [-1, 3])
            r = self.r
        else:
            p = p.view(-1, 3)
167
            r = self.r.movedim(-1, -2)  # Transpose rotation matrices
Nianchen Deng's avatar
sync    
Nianchen Deng committed
168
169
170
171
172
173
174
175
176
        out = torch.matmul(p, r).view(out_size)
        if not inverse:
            out = out + t
        return out

    def trans_vector(self, v: torch.Tensor, inverse=False) -> torch.Tensor:
        """
        Transform vectors by given translation vectors and rotation matrices

Nianchen Deng's avatar
sync    
Nianchen Deng committed
177
178
        :param v `Tensor(N.., 3)`: vectors to transform
        :param r `Tensor(M.., 3, 3)`: rotation matrices
Nianchen Deng's avatar
sync    
Nianchen Deng committed
179
        :param inverse: whether perform inverse transform
Nianchen Deng's avatar
sync    
Nianchen Deng committed
180
        :return `Tensor(M.., N.., 3)`: transformed vectors
Nianchen Deng's avatar
sync    
Nianchen Deng committed
181
182
        """
        out_size = list(self.r.size())[:-2] + list(v.size())[:-1] + [3]
183
        r = self.r if inverse else self.r.movedim(-1, -2)  # Transpose rotation matrices
Nianchen Deng's avatar
sync    
Nianchen Deng committed
184
185
        out = torch.matmul(v.view(-1, 3), r).view(out_size)
        return out
186

Nianchen Deng's avatar
sync    
Nianchen Deng committed
187
188
    def size(self) -> List[int]:
        return list(self.t.size()[:-1])
189

Nianchen Deng's avatar
sync    
Nianchen Deng committed
190
191
192
193
    def get(self, *index):
        return Trans(self.t[index], self.r[index])


BobYeah's avatar
sync    
BobYeah committed
194
195
196
197
def trans_point(p: torch.Tensor, t: torch.Tensor, r: torch.Tensor, inverse=False) -> torch.Tensor:
    """
    Transform points by given translation vectors and rotation matrices

Nianchen Deng's avatar
sync    
Nianchen Deng committed
198
199
200
    :param p `Tensor(N.., 3)`: points to transform
    :param t `Tensor(M.., 3)`: translation vectors
    :param r `Tensor(M.., 3, 3)`: rotation matrices
BobYeah's avatar
sync    
BobYeah committed
201
    :param inverse: whether perform inverse transform
Nianchen Deng's avatar
sync    
Nianchen Deng committed
202
    :return `Tensor(M.., N.., 3)`: transformed points
BobYeah's avatar
sync    
BobYeah committed
203
    """
Nianchen Deng's avatar
sync    
Nianchen Deng committed
204
205
206
207
    size_N = list(p.size())[0:-1]
    size_M = list(r.size())[0:-2]
    out_size = size_M + size_N + [3]
    t_size = size_M + [1 for _ in range(len(size_N))] + [3]
BobYeah's avatar
sync    
BobYeah committed
208
209
210
211
212
    t = t.view(t_size)
    if not inverse:
        r = r.movedim(-1, -2)  # Transpose rotation matrices
    else:
        p = p - t
Nianchen Deng's avatar
sync    
Nianchen Deng committed
213
214
    out = torch.matmul(p.view(size_M + [-1, 3]), r)
    out = out.view(out_size)
BobYeah's avatar
sync    
BobYeah committed
215
216
217
218
219
220
221
222
223
    if not inverse:
        out = out + t
    return out


def trans_vector(v: torch.Tensor, r: torch.Tensor, inverse=False) -> torch.Tensor:
    """
    Transform vectors by given translation vectors and rotation matrices

Nianchen Deng's avatar
sync    
Nianchen Deng committed
224
225
    :param v `Tensor(N.., 3)`: vectors to transform
    :param r `Tensor(M.., 3, 3)`: rotation matrices
BobYeah's avatar
sync    
BobYeah committed
226
    :param inverse: whether perform inverse transform
Nianchen Deng's avatar
sync    
Nianchen Deng committed
227
    :return `Tensor(M.., N.., 3)`: transformed vectors
BobYeah's avatar
sync    
BobYeah committed
228
229
230
231
232
233
    """
    out_size = list(r.size())[0:-2] + list(v.size())[0:-1] + [3]
    if not inverse:
        r = r.movedim(-1, -2)  # Transpose rotation matrices
    out = torch.matmul(v.flatten(0, -2), r).view(out_size)
    return out
Nianchen Deng's avatar
sync    
Nianchen Deng committed
234

235
236
237
238
239

def euler_to_matrix(euler: Union[Tuple[float, float, float], List[float]]) -> List[float]:
    q = glm.quat(glm.radians(glm.vec3(euler[0], euler[1], euler[2])))
    vec_list = glm.transpose(glm.mat3_cast(q)).to_list()
    return vec_list[0] + vec_list[1] + vec_list[2]