Commit 5d1d329d authored by Nianchen Deng's avatar Nianchen Deng
Browse files

sync

parent f6604bd2
9.999960660934448242e-01 4.699843702837824821e-04 -2.759433584287762642e-03 1.786941476166248322e-02 1.600000000000000000e+03
-2.758544636890292168e-03 -1.881236908957362175e-03 -9.999944567680358887e-01 1.374570187181234360e-03 1.440000000000000000e+03
-4.751728847622871399e-04 9.999981522560119629e-01 -1.879933173768222332e-03 1.649484597146511078e-02 5.601660766601562500e+02
9.999001622200012207e-01 0.000000000000000000e+00 -1.412937603890895844e-02 1.877934113144874573e-02 1.600000000000000000e+03
-1.412937697023153305e-02 0.000000000000000000e+00 -9.999002218246459961e-01 1.877934322692453861e-03 1.440000000000000000e+03
0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.690140925347805023e-02 5.601660766601562500e+02
9.999960660934448242e-01 4.699843702837824821e-04 -2.759433584287762642e-03 1.786941476166248322e-02 1.600000000000000000e+03
-2.758544636890292168e-03 -1.881236908957362175e-03 -9.999944567680358887e-01 1.374570187181234360e-03 1.440000000000000000e+03
-4.751728847622871399e-04 9.999981522560119629e-01 -1.879933173768222332e-03 1.649484597146511078e-02 5.601660766601562500e+02
9.999960660934448242e-01 4.699843702837824821e-04 -2.759433584287762642e-03 1.786941476166248322e-02 1.600000000000000000e+03
-2.758544636890292168e-03 -1.881236908957362175e-03 -9.999944567680358887e-01 1.374570187181234360e-03 1.440000000000000000e+03
-4.751728847622871399e-04 9.999981522560119629e-01 -1.879933173768222332e-03 1.649484597146511078e-02 5.601660766601562500e+02
9.999960660934448242e-01 4.699843702837824821e-04 -2.759433584287762642e-03 1.786941476166248322e-02 1.600000000000000000e+03
-2.758544636890292168e-03 -1.881236908957362175e-03 -9.999944567680358887e-01 1.374570187181234360e-03 1.440000000000000000e+03
-4.751728847622871399e-04 9.999981522560119629e-01 -1.879933173768222332e-03 1.649484597146511078e-02 5.601660766601562500e+02
import torch
import torch.nn as nn
from .modules import *
from utils import sphere
from utils import color
class BgNet(nn.Module):
def __init__(self, fc_params, encode, c):
"""
Initialize a multi-sphere-layer net
:param fc_params: parameters for full-connection network
:param sampler_params: parameters for sampler
:param normalize_coord: whether normalize the spherical coords to [0, 2pi] before encode
:param c: color mode
:param encode_to_dim: encode input to number of dimensions
"""
super().__init__()
self.color = c
self.coord_chns = 2
self.color_chns = color.chns(self.color)
self.coord_encoder = InputEncoder.Get(encode, self.coord_chns)
self.mlp = Mlp(coord_chns=self.coord_encoder.out_dim,
density_chns=0,
color_chns=self.color_chns,
core_nf=fc_params['nf'],
core_layers=fc_params['n_layers'],
activation=fc_params['activation'])
def forward(self, rays_o: torch.Tensor, rays_d: torch.Tensor, debug=False) -> torch.Tensor:
"""
rays -> colors
:param rays_o `Tensor(B, 3)`: rays' origin
:param rays_d `Tensor(B, 3)`: rays' direction
:return: 'color' -> `Tensor(B, C)``, inferred colors
"""
coords_encoded = self.coord_encoder(sphere.cartesian2spherical(rays_d)[..., 1:])
return {'color': self.mlp(coords_encoded)[0]}
\ No newline at end of file
import math
import torch
import torch.nn as nn
import torch.nn.functional as nn_f
from .modules import *
from utils import sphere
from utils import color
class CNerf(nn.Module):
def __init__(self, fc_params, sampler_params,
c: int = color.RGB,
coord_encode: int = 0):
super().__init__()
self.color = c
self.n_samples = sampler_params['n_samples']
self.coord_chns = 3
self.color_chns = color.chns(self.color)
self.coord_encoder = InputEncoder.Get(coord_encode, self.coord_chns)
self.density_net = Mlp(coord_chns=self.coord_encoder.out_dim, density_chns=1, color_chns=0,
core_nf=fc_params['nf'], core_layers=fc_params['n_layers'])
self.color_net = Mlp(coord_chns=self.coord_encoder.out_dim, density_chns=0, color_chns=self.color_chns,
core_nf=fc_params['nf'], core_layers=fc_params['n_layers'])
self.sampler = Sampler(**sampler_params)
self.rendering = NewRendering()
def forward(self, rays_o: torch.Tensor, rays_d: torch.Tensor, ret_depth=False, debug=False) -> torch.Tensor:
"""
rays -> colors
:param rays_o `Tensor(B, 3)`: rays' origin
:param rays_d `Tensor(B, 3)`: rays' direction
:return: `Tensor(B, C)``, inferred images/pixels
"""
coords, pts, depths = self.sampler(rays_o, rays_d)
encoded_position = self.coord_encoder(coords)
densities = self.density_net(encoded_position)[1]
colors = self.color_net(encoded_position)[0]
return self.rendering(colors, densities[..., 0], depths, ret_depth=ret_depth, debug=debug)
import math
import torch
import torch.nn as nn
import torch.nn.functional as nn_f
from .modules import *
from utils import sphere
from utils import color
class CNerf(nn.Module):
def __init__(self, fc_params, sampler_params,
c: int = color.RGB,
coord_encode: int = 0):
super().__init__()
self.color = c
self.n_samples = sampler_params['n_samples']
self.coord_chns = 3
self.color_chns = color.chns(self.color)
self.coord_encoder = InputEncoder.Get(coord_encode, self.coord_chns)
self.density_net = Mlp(coord_chns=self.coord_encoder.out_dim, density_chns=1, color_chns=0,
core_nf=fc_params['nf'], core_layers=fc_params['n_layers'])
self.color_net = Mlp(coord_chns=self.coord_encoder.out_dim * self.n_samples,
density_chns=0, color_chns=self.color_chns * self.n_samples,
core_nf=fc_params['nf'], core_layers=fc_params['n_layers'])
self.sampler = Sampler(**sampler_params)
self.rendering = NewRendering()
def forward(self, rays_o: torch.Tensor, rays_d: torch.Tensor,
ret_depth=False, debug=False) -> torch.Tensor:
"""
rays -> colors
:param rays_o `Tensor(B, 3)`: rays' origin
:param rays_d `Tensor(B, 3)`: rays' direction
:return: `Tensor(B, C)``, inferred images/pixels
"""
coords, pts, depths = self.sampler(rays_o, rays_d)
encoded_position = self.coord_encoder(coords)
densities = self.density_net(encoded_position)[1][..., 0]
colors = self.color_net(encoded_position.flatten(1, 2))[0]. \
view(-1, self.n_samples, self.color_chns)
return self.rendering(colors, densities, depths, ret_depth=ret_depth, debug=debug)
import math
import torch
import torch.nn as nn
import torch.nn.functional as nn_f
from .modules import *
from utils import sphere
from utils import color
from itertools import product
'''
The first step towards depth-guide acceleration
Sample according to raw depth input
'''
class CNerf(nn.Module):
def __init__(self, fc_params, sampler_params,
c: int = color.RGB,
coord_encode: int = 0,
n_bins: int = 128):
super().__init__()
self.color = c
self.n_samples = sampler_params['n_samples']
self.n_bins = n_bins
self.coord_chns = 3
self.color_chns = color.chns(self.color)
self.coord_encoder = InputEncoder.Get(coord_encode, self.coord_chns)
self.mlp = NewMlp(coord_chns=self.coord_encoder.out_dim,
density_chns=1,
color_chns=self.color_chns,
core_nf=fc_params['nf'],
core_layers=fc_params['n_layers'],
activation=fc_params['activation'],
skips=fc_params['skips'])
self.sampler = PdfSampler(**sampler_params, n_bins=n_bins)
self.rendering = NewRendering()
def set_depth_maps(self, rays_o, rays_d, depthmaps):
"""
[summary]
:param rays_o `Tensor(B, H, W, 3)`
:param rays_d `Tensor(B, H, W, 3)
:param depthmaps `Tensor(B, H, W)`: [description]
"""
with torch.no_grad():
radius_maps = sphere.cartesian2spherical(rays_o + rays_d * depthmaps[..., None],
inverse_r=self.sampler.lindisp)[..., 0]
bin_ids = torch.floor((radius_maps - self.sampler.s_range[0]) /
(self.sampler.s_range[1] - self.sampler.s_range[0]) * self.n_bins)
bin_ids = bin_ids.clamp(0, self.n_bins - 1).to(torch.long)[..., None]
k = 3
self.bin_weights = torch.zeros_like(bin_ids.expand(-1, -1, -1, self.n_bins),
dtype=torch.int8) # (B, H, W, N)
# 10 Views per batch to keep memory cost low enough
batch_size = 10
temp_weights = torch.empty_like(self.bin_weights[:batch_size]) # (B', H, W, N)
for offset in range(0, bin_ids.size(0), 10):
bidx = slice(offset, min(offset + batch_size, bin_ids.size(0)))
idx = slice(0, bidx.stop - bidx.start)
temp_weights.fill_(0)
for i, j in product(range(-2, 3), range(-2, 3)):
w = int(10 * (1 - math.sqrt(0.5 * (i * i + j * j)) / 3))
src_sy = slice(-j) if j < 0 else slice(j, None)
src_sx = slice(-i) if i < 0 else slice(i, None)
dst_sy = slice(-j) if j > 0 else slice(j, None)
dst_sx = slice(-i) if i > 0 else slice(i, None)
bin_ids_subview = bin_ids[bidx, src_sy, src_sx]
weights_subview = temp_weights[idx, dst_sy, dst_sx]
weights_subview.scatter_(-1, bin_ids_subview,
weights_subview.gather(-1, bin_ids_subview).clamp_min(w))
# Only keep top-k bins
_, bin_idxs = torch.topk(temp_weights[idx], k) # (B', H, W, N)
self.bin_weights[bidx].scatter_(-1, bin_idxs, 1)
# temp_depth_weights[idx].gather(-1, bin_idxs))
def forward(self, rays_o: torch.Tensor, rays_d: torch.Tensor, rays_weights: torch.Tensor,
ret_depth=False, debug=False) -> torch.Tensor:
"""
rays -> colors
:param rays_o `Tensor(B, 3)`: rays' origin
:param rays_d `Tensor(B, 3)`: rays' direction
:param rays_depth `Tensor(B)`: rays' depth
:return: `Tensor(B, C)``, inferred images/pixels
"""
coords, pts, depths, _ = self.sampler(rays_o, rays_d, rays_weights)
encoded_position = self.coord_encoder(coords)
colors, densities = self.mlp(encoded_position)
return self.rendering(colors, densities[..., 0], depths,
ret_depth=ret_depth, debug=debug)
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment