Commit 338ae906 authored by Nianchen Deng's avatar Nianchen Deng
Browse files

tog'21 baseline

parent f1dd9e3a
...@@ -13,18 +13,94 @@ Or ref to https://pytorch.org/get-started/locally/ for install guide ...@@ -13,18 +13,94 @@ Or ref to https://pytorch.org/get-started/locally/ for install guide
* tensorboard * tensorboard
* (Optional) dash
```
$ conda install dash pandas
```
## 2. Install Pip packages: ## 2. Install Pip packages:
* pyGlm * pyGlm
* tensorboardX * tensorboardX
* (optional) opencv-python * (optional) opencv-python
* (optional) thop
* (optional) ConcurrentLogHandler
# Useful commands # Useful commands
## 1. Video generate: ## 1. Video generate:
``` ```
$ ffmpeg -y -r 30 -i view_%04d.png -c:v libx264 -vf fps=30 -pix_fmt yuv420p ../train.mp4 $ ffmpeg -y -r 50 -i %04d.png -c:v libx264 -vframes 600 ../classroom_hmd_mono_hint.mp4
``` ```
## 2. Convert onnx to tensorRT ## 2. Convert onnx to tensorRT
``` ```
$ trtexec --onnx=net@256x256x2.onnx --fp16 --saveEngine=net@256x256x2.trt --workspace=4096 $ trtexec --onnx=net@256x256x2.onnx --fp16 --saveEngine=net@256x256x2.trt --workspace=4096
```
# Install FFMpeg with Extra Codecs:
```
sudo apt-get update -qq && sudo apt-get -y install \
autoconf \
automake \
build-essential \
cmake \
git-core \
libass-dev \
libfreetype6-dev \
libgnutls28-dev \
libsdl2-dev \
libtool \
libva-dev \
libvdpau-dev \
libvorbis-dev \
libxcb1-dev \
libxcb-shm0-dev \
libxcb-xfixes0-dev \
meson \
ninja-build \
pkg-config \
texinfo \
wget \
yasm \
zlib1g-dev \
libunistring-dev \
libvpx-dev \
libfdk-aac-dev \
libmp3lame-dev \
libopus-dev \
nasm \
libx264-dev \
libx265-dev \
libnuma-dev
mkdir -p ~/ffmpeg_sources ~/bin
cd ~/ffmpeg_sources && \
wget -O ffmpeg-snapshot.tar.bz2 https://ffmpeg.org/releases/ffmpeg-snapshot.tar.bz2 && \
tar xjvf ffmpeg-snapshot.tar.bz2
cd ffmpeg && \
PATH="$HOME/bin:$PATH" PKG_CONFIG_PATH="$HOME/ffmpeg_build/lib/pkgconfig" ./configure \
--prefix="$HOME/ffmpeg_build" \
--pkg-config-flags="--static" \
--extra-cflags="-I$HOME/ffmpeg_build/include" \
--extra-ldflags="-L$HOME/ffmpeg_build/lib" \
--extra-libs="-lpthread -lm" \
--bindir="$HOME/bin" \
--enable-gpl \
--enable-gnutls \
--enable-libass \
--enable-libfdk-aac \
--enable-libfreetype \
--enable-libmp3lame \
--enable-libopus \
--enable-libvorbis \
--enable-libvpx \
--enable-libx264 \
--enable-libx265 \
--enable-nonfree && \
PATH="$HOME/bin:$PATH" make && \
make install && \
hash -r
``` ```
\ No newline at end of file
.slider {
margin-top: 30px
}
\ No newline at end of file
#/usr/bin/bash
curdir=$(pwd)
datadir="$curdir/data/__new/classroom_fovea_r360x80_t0.6"
videodir="$datadir/eval_video"
epochs=50
if [ ! -d "$videodir" ]; then
echo "make directory for Video"
mkdir $videodir
fi
# nets: 1, 2, 4, 8
# layers: 2, 4, 8
# channels: 64 128 256 512 1024
for n_nets in 1 2 4 8; do
for n_layers in 2 4 8; do
for nf in 64 128 256 512 1024; do
for n_samples in 8 16 32 64 128; do
configid="eval@snerffast${n_nets}-rgb_e6_fc${nf}x${n_layers}_d1.00-7.00_s${n_samples}_~p"
exportname="eval_${n_nets}x${nf}x${n_layers}_${n_samples}"
src_path="../$configid/output_$epochs/helix_color.mp4"
dst_path="$videodir/$exportname.mp4"
if [ -f "$videodir/$src_path" ]; then
if [ ! -f "$dst_path" ]; then
ln -s $src_path $dst_path
fi
fi
done
done
done
done
\ No newline at end of file
...@@ -21,22 +21,28 @@ fi ...@@ -21,22 +21,28 @@ fi
for n_nets in 1 2 4 8; do for n_nets in 1 2 4 8; do
for n_layers in 2 4 8; do for n_layers in 2 4 8; do
for nf in 64 128 256 512 1024; do for nf in 64 128 256 512 1024; do
configid="eval@snerffast${n_nets}-rgb_e6_fc${nf}x${n_layers}_d1.00-7.00_s64_~p" for n_samples in 8 16 32 64 128; do
exportname="eval_${n_nets}x${nf}x${n_layers}" configid="eval@snerffast${n_nets}-rgb_e6_fc${nf}x${n_layers}_d1.00-7.00_s${n_samples}_~p"
pth_path="$datadir/$configid/model-epoch_$epochs.pth" if (( $n_samples == 64 )); then
onnx_path="$onnxdir/$exportname.onnx" exportname="eval_${n_nets}x${nf}x${n_layers}"
trt_path="$trtdir/$exportname.trt" else
time_perf_path="$trtdir/time/$exportname.json" exportname="eval_${n_nets}x${nf}x${n_layers}_${n_samples}"
if [ -f "$pth_path" ]; then
if [ ! -f "$onnx_path" ]; then
# Export ONNX model
python tools/export_snerf_fast.py $pth_path -b 65536 -o $onnx_path
fi fi
if [ ! -f "$trt_path" ]; then pth_path="$datadir/$configid/model-epoch_$epochs.pth"
# Export TensorRT engine onnx_path="$onnxdir/$exportname.onnx"
trtexec --onnx=$onnx_path --fp16 --saveEngine=$trt_path --workspace=4096 --exportTimes=$time_perf_path --noDataTransfers trt_path="$trtdir/$exportname.trt"
time_perf_path="$trtdir/time/$exportname.json"
if [ -f "$pth_path" ]; then
if [ ! -f "$onnx_path" ]; then
# Export ONNX model
python tools/export_snerf_fast.py $pth_path -b 65536 -o $onnx_path
fi
if [ ! -f "$trt_path" ]; then
# Export TensorRT engine
trtexec --onnx=$onnx_path --fp16 --saveEngine=$trt_path --workspace=4096 --exportTimes=$time_perf_path --noDataTransfers
fi
fi fi
fi done
done done
done done
done done
\ No newline at end of file
...@@ -4,6 +4,7 @@ testcase=$1 ...@@ -4,6 +4,7 @@ testcase=$1
datadir='data/__new/classroom_fovea_r360x80_t0.6' datadir='data/__new/classroom_fovea_r360x80_t0.6'
trainset='data/__new/classroom_fovea_r360x80_t0.6/r120x80.json' trainset='data/__new/classroom_fovea_r360x80_t0.6/r120x80.json'
testset='data/__new/classroom_fovea_r360x80_t0.6/r120x80_test.json' testset='data/__new/classroom_fovea_r360x80_t0.6/r120x80_test.json'
videoset='data/__new/classroom_fovea_r360x80_t0.6/helix.json'
epochs=50 epochs=50
# nets: 1, 2, 4, 8 # nets: 1, 2, 4, 8
...@@ -30,8 +31,14 @@ for nf in 64 128 256 512 1024; do ...@@ -30,8 +31,14 @@ for nf in 64 128 256 512 1024; do
python run_spherical_view_syn.py $trainset -i $configid -e $epochs python run_spherical_view_syn.py $trainset -i $configid -e $epochs
fi fi
fi fi
if ! ls $datadir/$configid/output_$epochs/perf_r120x80* >/dev/null 2>&1; then if ! ls $datadir/$configid/output_$epochs/perf* >/dev/null 2>&1; then
python run_spherical_view_syn.py $trainset -t -m $configid/model-epoch_$epochs.pth -o perf python run_spherical_view_syn.py $trainset -t -m $configid/model-epoch_$epochs.pth -o perf
python run_spherical_view_syn.py $testset -t -m $configid/model-epoch_$epochs.pth -o perf python run_spherical_view_syn.py $testset -t -m $configid/model-epoch_$epochs.pth -o perf
fi fi
if [ ! -d "$datadir/$configid/output_$epochs/r120x80_test_color" ]; then
python run_spherical_view_syn.py $testset -t -m $configid/model-epoch_$epochs.pth -o color
fi
if [ ! -f "$datadir/$configid/output_$epochs/helix_color.mp4" ]; then
python run_spherical_view_syn.py $videoset -t -m $configid/model-epoch_$epochs.pth -o color --output-type video
fi
done done
\ No newline at end of file
...@@ -50,6 +50,7 @@ class FoveatedNeuralRenderer(object): ...@@ -50,6 +50,7 @@ class FoveatedNeuralRenderer(object):
def render(self, view: Trans, gaze, right_gaze=None, *, def render(self, view: Trans, gaze, right_gaze=None, *,
stereo_disparity=0, stereo_disparity=0,
using_mask=True, using_mask=True,
mono_periph_mode=0,
ret_raw=False) -> Union[Mapping[str, torch.Tensor], Tuple[Mapping[str, torch.Tensor]]]: ret_raw=False) -> Union[Mapping[str, torch.Tensor], Tuple[Mapping[str, torch.Tensor]]]:
if stereo_disparity > TINY_FLOAT: if stereo_disparity > TINY_FLOAT:
left_view = Trans( left_view = Trans(
...@@ -61,24 +62,50 @@ class FoveatedNeuralRenderer(object): ...@@ -61,24 +62,50 @@ class FoveatedNeuralRenderer(object):
left_gaze = gaze left_gaze = gaze
right_gaze = gaze if right_gaze is None else right_gaze right_gaze = gaze if right_gaze is None else right_gaze
left_layers_mask = self.foveation.get_layers_mask(left_gaze) \ layers_mask = self.foveation.get_layers_mask() if using_mask else [None] * 3
if using_mask else [None] * 3 left_shifts = None
right_layers_mask = self.foveation.get_layers_mask(right_gaze) \ right_shifts = None
if using_mask else [None] * 3
res_raw_left = [ if using_mask and mono_periph_mode != 0:
self._render(self.layers_net[i], self.layers_cam[i], left_view, fovea_left = self._render(self.layers_net[0], self.layers_cam[0], left_view, left_gaze,
left_gaze if i < 2 else None, layer_mask=layers_mask[0])['color']
layer_mask=left_layers_mask[i])['color'] fovea_right = self._render(self.layers_net[0], self.layers_cam[0], right_view, right_gaze,
for i in range(3) layer_mask=layers_mask[0])['color']
] if mono_periph_mode == 3:
res_raw_right = [ mid = self._render(self.layers_net[1], self.layers_cam[1], view,
self._render(self.layers_net[i], self.layers_cam[i], right_view, ((left_gaze[0] + right_gaze[0]) // 2, left_gaze[1]),
right_gaze if i < 2 else None, layer_mask=layers_mask[1])['color']
layer_mask=right_layers_mask[i])['color'] periph = self._render(self.layers_net[2], self.layers_cam[2], view)['color']
for i in range(3) raw_left = [fovea_left, mid, periph]
] raw_right = [fovea_right, mid, periph]
return self._gen_output(res_raw_left, left_gaze, ret_raw), \ shift = int(left_gaze[0] - right_gaze[0]) // 2
self._gen_output(res_raw_right, right_gaze, ret_raw) left_shifts = [0, 0, shift]
right_shifts = [0, 0, -shift]
else:
mid_left = self._render_mid(self.layers_net[1], self.layers_cam[1], left_view, left_gaze,
layer_mask=layers_mask[1], mono_view=view,
blend_view=mono_periph_mode == 1)['color']
mid_right = self._render_mid(self.layers_net[1], self.layers_cam[1], right_view, right_gaze,
layer_mask=layers_mask[1], mono_view=view,
blend_view=mono_periph_mode == 1)['color']
periph = self._render(self.layers_net[2], self.layers_cam[2], view)['color']
raw_left = [fovea_left, mid_left, periph]
raw_right = [fovea_right, mid_right, periph]
else:
raw_left = [
self._render(self.layers_net[i], self.layers_cam[i], left_view,
left_gaze if i < 2 else None,
layer_mask=layers_mask[i])['color']
for i in range(3)
]
raw_right = [
self._render(self.layers_net[i], self.layers_cam[i], right_view,
right_gaze if i < 2 else None,
layer_mask=layers_mask[i])['color']
for i in range(3)
]
return self._gen_output(raw_left, left_gaze, left_shifts, ret_raw=ret_raw), \
self._gen_output(raw_right, right_gaze, right_shifts, ret_raw=ret_raw)
else: else:
layers_mask = self.foveation.get_layers_mask(gaze) if using_mask else None layers_mask = self.foveation.get_layers_mask(gaze) if using_mask else None
res_raw = [ res_raw = [
...@@ -86,34 +113,7 @@ class FoveatedNeuralRenderer(object): ...@@ -86,34 +113,7 @@ class FoveatedNeuralRenderer(object):
layer_mask=layers_mask[i] if layers_mask is not None else None)['color'] layer_mask=layers_mask[i] if layers_mask is not None else None)['color']
for i in range(3) for i in range(3)
] ]
return self._gen_output(res_raw, gaze, ret_raw) return self._gen_output(res_raw, gaze, ret_raw=ret_raw)
'''
if mono_trans != None and shift == 0: # do warp
fovea_depth[torch.isnan(fovea_depth)] = 50
mid_depth[torch.isnan(mid_depth)] = 50
periph_depth[torch.isnan(periph_depth)] = 50
if warp_by_depth:
z_list = misc.depth_sample((1, 50), 4, True)
mid_inferred = self._warp(trans, mono_trans, mid_cam,
z_list, mid_inferred, mid_depth)
periph_inferred = self._warp(trans, mono_trans, periph_cam,
z_list, periph_inferred, periph_depth)
else:
p = torch.tensor([[0, 0, torch.mean(fovea_depth)]],
device=self.device)
p_ = trans.trans_point(mono_trans.trans_point(p), inverse=True)
shift = self.full_cam.proj(
p_, center_as_origin=True)[..., 0].item()
shift = round(shift)
blended = self.foveation.synthesis([
fovea_refined,
mid_refined,
periph_refined
], (gaze[0], gaze[1]), [0, shift, shift] if shift != 0 else None)
'''
def _render(self, net, cam: CameraParam, view: Trans, gaze=None, *, def _render(self, net, cam: CameraParam, view: Trans, gaze=None, *,
ret_depth=False, ret_depth=False,
...@@ -142,9 +142,54 @@ class FoveatedNeuralRenderer(object): ...@@ -142,9 +142,54 @@ class FoveatedNeuralRenderer(object):
'depth': net_output['depth'].view(1, cam.res[0], cam.res[1]) if ret_depth else None 'depth': net_output['depth'].view(1, cam.res[0], cam.res[1]) if ret_depth else None
} }
def _gen_output(self, layers_img: List[torch.Tensor], gaze: Tuple[float, float], ret_raw=False) -> Mapping[str, torch.Tensor]: def _render_mid(self, net, cam: CameraParam, view: Trans, gaze=None, *,
layer_mask: torch.Tensor,
mono_view: Trans,
blend_view: bool,
ret_depth=False) -> Mapping[str, torch.Tensor]:
"""
[summary]
:param net: [description]
:param cam: [description]
:param view: [description]
:param layer_mask: [description]
:param mono_view: [description]
:param gaze: [description], defaults to None
:param ret_depth: [description], defaults to False
:return: [description]
"""
if gaze is not None:
cam = self._adjust_cam(cam, gaze)
k = layer_mask[None, ..., None].clamp(1 if blend_view else 2, 2) - 1 # (1, H, W, 1)
rays_o = (1 - k) * view.t + k * mono_view.t # (1, H, W, 3)
rays_d = view.trans_vector(cam.get_local_rays()) # (1, H, W, 3)
if layer_mask is not None:
infer_mask = layer_mask >= 0
rays_o = rays_o[:, infer_mask]
rays_d = rays_d[:, infer_mask]
net_output = net(rays_o.view(-1, 3), rays_d.view(-1, 3), ret_depth=ret_depth)
ret = {
'color': torch.zeros(1, cam.res[0], cam.res[1], 3, device=self.device)
}
ret['color'][:, infer_mask] = net_output['color']
ret['color'] = ret['color'].permute(0, 3, 1, 2)
if ret_depth:
ret['depth'] = torch.zeros(1, cam.res[0], cam.res[1])
ret['depth'][:, infer_mask] = net_output['depth']
return ret
else:
net_output = net(rays_o.view(-1, 3), rays_d.view(-1, 3), ret_depth=ret_depth)
return {
'color': net_output['color'].view(1, cam.res[0], cam.res[1], -1).permute(0, 3, 1, 2),
'depth': net_output['depth'].view(1, cam.res[0], cam.res[1]) if ret_depth else None
}
def _gen_output(self, layers_img: List[torch.Tensor], gaze: Tuple[float, float], shifts=None, ret_raw=False) -> Mapping[str, torch.Tensor]:
refined = self._post_process(layers_img) refined = self._post_process(layers_img)
blended = self.foveation.synthesis(refined, gaze) blended = self.foveation.synthesis(refined, gaze, shifts)
ret = { ret = {
'layers_img': refined, 'layers_img': refined,
'blended': blended 'blended': blended
......
...@@ -10,7 +10,7 @@ from utils import misc ...@@ -10,7 +10,7 @@ from utils import misc
class Foveation(object): class Foveation(object):
def __init__(self, layers_fov: List[float], layers_res: List[Tuple[float, float]], def __init__(self, layers_fov: List[float], layers_res: List[Tuple[float, float]],
out_res: Tuple[int, int], *, blend=0.6, device=None): out_res: Tuple[int, int], *, blend: float = 0.6, device: torch.device = None):
self.layers_fov = layers_fov self.layers_fov = layers_fov
self.layers_res = layers_res self.layers_res = layers_res
self.out_res = out_res self.out_res = out_res
...@@ -23,15 +23,15 @@ class Foveation(object): ...@@ -23,15 +23,15 @@ class Foveation(object):
] # blend maps of fovea layers ] # blend maps of fovea layers
self.coords = misc.meshgrid(*out_res).to(device=device) self.coords = misc.meshgrid(*out_res).to(device=device)
def to(self, device): def to(self, device: torch.device):
self.eye_fovea_blend = [x.to(device=device) self.eye_fovea_blend = [x.to(device=device) for x in self.eye_fovea_blend]
for x in self.eye_fovea_blend]
self.coords = self.coords.to(device=device) self.coords = self.coords.to(device=device)
return self return self
def synthesis(self, layers: List[torch.Tensor], def synthesis(self, layers: List[torch.Tensor], fovea_center: Tuple[float, float],
fovea_center: Tuple[float, float], shifts: List[int] = None,
shifts: List[int] = None, do_blend=True) -> torch.Tensor: do_blend: bool = True,
crop_mode: bool = False) -> torch.Tensor:
""" """
Generate foveated retinal image by blending fovea layers Generate foveated retinal image by blending fovea layers
**Note: current implementation only support two fovea layers** **Note: current implementation only support two fovea layers**
...@@ -41,26 +41,32 @@ class Foveation(object): ...@@ -41,26 +41,32 @@ class Foveation(object):
""" """
output: torch.Tensor = nn_f.interpolate(layers[-1], self.out_res, output: torch.Tensor = nn_f.interpolate(layers[-1], self.out_res,
mode='bilinear', align_corners=False) mode='bilinear', align_corners=False)
if shifts != None: if shifts is not None:
output = img.horizontal_shift(output, shifts[-1]) output = img.horizontal_shift(output, shifts[-1])
c = torch.tensor([ c = torch.tensor([
fovea_center[0] + self.out_res[1] / 2, fovea_center[0] + (self.out_res[1] - 1) / 2,
fovea_center[1] + self.out_res[0] / 2 fovea_center[1] + (self.out_res[0] - 1) / 2
], device=self.coords.device) ], device=self.device)
for i in range(self.n_layers - 2, -1, -1): for i in range(self.n_layers - 2, -1, -1):
if layers[i] == None: if layers[i] is None:
continue continue
R = self.get_layer_size_in_final_image(i) / 2 R = self.get_layer_size_in_final_image(i) / 2
grid = ((self.coords - c) / R)[None, ...] grid = ((self.coords - c) / R)[None, ...]
if shifts != None: if shifts is not None:
grid = img.horizontal_shift(grid, shifts[i], -2) grid = img.horizontal_shift(grid, shifts[i], -2)
# (1, 1, H:out, W:out) # (1, 1, H:out, W:out)
if do_blend: if do_blend:
blend = nn_f.grid_sample(self.eye_fovea_blend[i][None, None], grid, align_corners=False) blend = nn_f.grid_sample(self.eye_fovea_blend[i][None, None], grid,
output.mul_(1 - blend).add_(nn_f.grid_sample(layers[i], grid, align_corners=False) * blend) align_corners=False)
else: else:
blend = nn_f.grid_sample(torch.ones_like(self.eye_fovea_blend[i][None, None]), grid, align_corners=False) blend = nn_f.grid_sample(torch.ones_like(self.eye_fovea_blend[i][None, None]), grid,
output.mul_(1 - blend).add_(nn_f.grid_sample(layers[i], grid, align_corners=False) * blend) align_corners=False)
output.mul_(1 - blend)
if crop_mode:
output.add_(blend * nn_f.interpolate(layers[i], self.out_res, mode='bilinear',
align_corners=False))
else:
output.add_(blend * nn_f.grid_sample(layers[i], grid, align_corners=False))
return output return output
def get_layer_size_in_final_image(self, i: int) -> int: def get_layer_size_in_final_image(self, i: int) -> int:
...@@ -98,7 +104,7 @@ class Foveation(object): ...@@ -98,7 +104,7 @@ class Foveation(object):
r = torch.norm(p - R, dim=2) # (size, size, 2) r = torch.norm(p - R, dim=2) # (size, size, 2)
return misc.smooth_step(R, R * self.blend, r) return misc.smooth_step(R, R * self.blend, r)
def get_layers_mask(self, gaze) -> List[torch.Tensor]: def get_layers_mask(self, gaze=None) -> List[torch.Tensor]:
""" """
Generate mask images for layers[:-1] Generate mask images for layers[:-1]
the meaning of values in mask images: the meaning of values in mask images:
...@@ -111,15 +117,19 @@ class Foveation(object): ...@@ -111,15 +117,19 @@ class Foveation(object):
""" """
layers_mask = [] layers_mask = []
for i in range(self.n_layers): for i in range(self.n_layers):
layers_mask.append(torch.ones(*self.layers_res[i], device=self.device) * -1)
if i == self.n_layers - 1: if i == self.n_layers - 1:
if gaze is None:
layers_mask.append(torch.ones(*self.layers_res[i], device=self.device))
continue
c = torch.tensor([ c = torch.tensor([
(gaze[0] + 0.5 * self.out_res[1]) / self.out_res[0], (gaze[0] + 0.5 * self.out_res[1]) / self.out_res[0],
(gaze[1] + 0.5 * self.out_res[0]) / self.out_res[0] (gaze[1] + 0.5 * self.out_res[0]) / self.out_res[0]
], device=self.device) ], device=self.device)
else: else:
c = torch.tensor([0.5, 0.5], device=self.device) c = torch.tensor([0.5, 0.5], device=self.device)
coord = misc.meshgrid(*self.layers_res[i]).to(device=self.device) / self.layers_res[i][0] layers_mask.append(torch.ones(*self.layers_res[i], device=self.device) * -1)
coord = misc.meshgrid(
*self.layers_res[i]).to(device=self.device) / self.layers_res[i][0]
r = 2 * torch.norm(coord - c, dim=-1) r = 2 * torch.norm(coord - c, dim=-1)
inner_radius = self.get_source_layer_cover_size_in_target_layer( inner_radius = self.get_source_layer_cover_size_in_target_layer(
self.layers_fov[i - 1], self.layers_fov[i], self.layers_res[i][0]) / self.layers_res[i][0] \ self.layers_fov[i - 1], self.layers_fov[i], self.layers_res[i][0]) / self.layers_res[i][0] \
...@@ -129,7 +139,7 @@ class Foveation(object): ...@@ -129,7 +139,7 @@ class Foveation(object):
else: else:
bounds = [inner_radius * (1 - self.blend), inner_radius, self.blend, 1] bounds = [inner_radius * (1 - self.blend), inner_radius, self.blend, 1]
for bi in range(len(bounds) - 1): for bi in range(len(bounds) - 1):
region = torch.logical_and(r > bounds[bi], r <= bounds[bi + 1]) region = torch.logical_and(r >= bounds[bi], r < bounds[bi + 1])
layers_mask[i][region] = bi + \ layers_mask[i][region] = bi + \
(r[region] - bounds[bi]) / (bounds[bi + 1] - bounds[bi]) (r[region] - bounds[bi]) / (bounds[bi + 1] - bounds[bi])
return layers_mask return layers_mask
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'bgnet' config.net = 'bgnet'
config.N_ENCODE_DIM = 10 config.n_pos_encode = 10
config.FC_PARAMS.update({ config.fc.update({
'nf': 128, 'nf': 128,
'n_layers': 4 'n_layers': 4
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'cnerf128' config.net = 'cnerf128'
config.N_ENCODE_DIM = 10 config.n_pos_encode = 10
config.DEPTH_REF = True config.depth_ref = True
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (0.4, 6), 'sample_range': (0.4, 6),
'n_samples': 8, 'n_samples': 8,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'dnerfa' config.net = 'dnerfa'
config.N_ENCODE_DIM = 10 config.n_pos_encode = 10
config.DEPTH_REF = True config.depth_ref = True
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (0.4, 6), 'sample_range': (0.4, 6),
'n_samples': 8, 'n_samples': 8,
'perturb_sample': False 'perturb_sample': False
}) })
\ No newline at end of file
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'snerffast4' config.net = 'snerffast4'
config.N_ENCODE_DIM = 6 config.n_pos_encode = 6
#config.N_DIR_ENCODE = 4 #config.n_dir_encode = 4
config.FC_PARAMS.update({ config.fc.update({
'nf': 512, 'nf': 512,
'n_layers': 4 'n_layers': 4
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (1, 30), 'sample_range': (1, 50),
'n_samples': 64, 'n_samples': 64,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config):
# Net parameters
config.net = 'snerffast4'
config.n_pos_encode = 6
#config.n_dir_encode = 4
config.fc.update({
'nf': 256,
'n_layers': 8
})
config.sa.update({
'sample_range': (1, 50),
'n_samples': 64,
'perturb_sample': False
})
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'snerffast4' config.net = 'snerffast4'
config.N_ENCODE_DIM = 6 config.n_pos_encode = 6
#config.N_DIR_ENCODE = 4 #config.n_dir_encode = 4
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (1, 50), 'sample_range': (1, 50),
'n_samples': 64, 'n_samples': 64,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'msl2fast' config.net = 'msl2fast'
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (0.5, 5), 'sample_range': (0.5, 5),
'n_samples': 128, 'n_samples': 128,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'msl' config.net = 'msl'
config.FC_PARAMS.update({ config.fc.update({
'nf': 128, 'nf': 128,
'n_layers': 4 'n_layers': 4
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (1, 50), 'sample_range': (1, 50),
'n_samples': 32 'n_samples': 32
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'mslfast' config.net = 'mslfast'
config.N_ENCODE_DIM = 6 config.n_pos_encode = 6
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (0.5, 5), 'sample_range': (0.5, 5),
'n_samples': 128, 'n_samples': 128,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'mslray' config.net = 'mslray'
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (0.5, 5), 'sample_range': (0.5, 5),
'n_samples': 128, 'n_samples': 128,
'perturb_sample': False 'perturb_sample': False
}) })
def update_config(config): def update_config(config):
# Net parameters # Net parameters
config.NET_TYPE = 'nerf' config.net = 'nerf'
config.N_ENCODE_DIM = 10 config.n_pos_encode = 10
config.FC_PARAMS.update({ config.fc.update({
'nf': 256, 'nf': 256,
'n_layers': 8 'n_layers': 8
}) })
config.SAMPLE_PARAMS.update({ config.sa.update({
'depth_range': (1, 50), 'sample_range': (1, 50),
'n_samples': 64, 'n_samples': 64,
'perturb_sample': True 'perturb_sample': True
}) })
...@@ -15,5 +15,5 @@ def update_config(config): ...@@ -15,5 +15,5 @@ def update_config(config):
'enable': True, 'enable': True,
'nf': 256, 'nf': 256,
'n_layers': 8, 'n_layers': 8,
'additional_samples': 128 'n_samples': 128
}) })
\ No newline at end of file
def update_config(config):
# Net parameters
config.net = 'snerffast4'
config.n_pos_encode = 6
#config.n_dir_encode = 4
config.fc.update({
'nf': 256,
'n_layers': 8
})
config.sa.update({
'sample_range': (1, 50),
'n_samples': 64,
'perturb_sample': False
})
nerf_horns.py
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment