Commit 2a1d7973 authored by Nianchen Deng's avatar Nianchen Deng
Browse files

for batch infer evaluation on multiple hosts

parent 7e0ade21
......@@ -10,6 +10,6 @@
"__config": "cpp",
"__nullptr": "cpp"
},
"python.pythonPath": "/home/dengnc/miniconda3/envs/pytorch/bin/python",
"python.pythonPath": "/home/dengnc/miniconda3/bin/python",
"jupyter.jupyterServerType": "local"
}
\ No newline at end of file
# deep_view_syn
# Configure environment
## 1. Install Conda packages:
Requirement:
* Pytorch 1.8.1 with CUDA
Pytorch 1.7
```
$ conda install pytorch torchvision torchaudio cudatoolkit=<your cuda version> -c pytorch -c nvidia
```
cv2
Or ref to https://pytorch.org/get-started/locally/ for install guide
numpy
* matplotlib
matplotlib
* tensorboard
torchvision
## 2. Install Pip packages:
* pyGlm
* tensorboardX
* (optional) opencv-python
json
# Useful commands
## 1. Video generate:
```
$ ffmpeg -y -r 30 -i view_%04d.png -c:v libx264 -vf fps=30 -pix_fmt yuv420p ../train.mp4
```
跑训练:
python main_lf_syn.py
DATA_FILE = "/home/yejiannan/Project/LightField/data/lf_syn"
DATA_JSON = "/home/yejiannan/Project/LightField/data/data_lf_syn_full.json"
OUTPUT_DIR = "/home/yejiannan/Project/LightField/outputE/lf_syn_full"
这里要重新配置一下数据的位置
Video generate:
ffmpeg -y -r 30 -i view_%04d.png -c:v libx264 -vf fps=30 -pix_fmt yuv420p ../train.mp4
trtexec --onnx=net@256x256x2.onnx --fp16 --saveEngine=net@256x256x2.trt --workspace=4096
\ No newline at end of file
## 2. Convert onnx to tensorRT
```
$ trtexec --onnx=net@256x256x2.onnx --fp16 --saveEngine=net@256x256x2.trt --workspace=4096
```
\ No newline at end of file
#/usr/bin/bash
testcase=$1
dataset='data/gas_fovea_r90x30_t0.3_2021.01.11'
datadir='data/__new/classroom_fovea_r360x80_t0.6'
trainset='data/__new/classroom_fovea_r360x80_t0.6/r120x80.json'
testset='data/__new/classroom_fovea_r360x80_t0.6/r120x80_test.json'
epochs=50
# layers: 4, 8
# samples: 4, 16, 64
# channels: 64 128 256
x=0
nf_arr=($x 128 256 256)
n_layers_arr=($x 8 4 8)
n_samples=32
nf=${nf_arr[$testcase]}
# nets: 1, 2, 4, 8
# layers: 2, 4, 8
# channels: 128 256 512
n_nets_arr=(1 2 4 8 1 2 4 8 1 2 4 8)
n_layers_arr=(2 2 2 2 4 4 4 4 8 8 8 8)
n_nets=${n_nets_arr[$testcase]}
n_layers=${n_layers_arr[$testcase]}
#for n_layers in 4 8; do
# for nf in 64 128 256; do
# for n_samples in 4 16 64; do
configid="infer_test@msl-rgb_e10_fc${nf}x${n_layers}_d1.00-50.00_s${n_samples}"
python run_spherical_view_syn.py --dataset $dataset/train.json --config-id $configid --device $testcase --epochs $epochs
python run_spherical_view_syn.py --dataset $dataset/train.json --test $dataset/$configid/model-epoch_$epochs.pth --perf --device $testcase
python run_spherical_view_syn.py --dataset $dataset/test.json --test $dataset/$configid/model-epoch_$epochs.pth --perf --device $testcase
# done
# done
#done
for nf in 64 128 256 512 1024; do
configid="eval@snerffast${n_nets}-rgb_e6_fc${nf}x${n_layers}_d1.00-7.00_s64_~p"
if [ -f "$datadir/$configid/model-epoch_$epochs.pth" ]; then
continue
fi
cont_epoch=0
for ((i=$epochs-1;i>0;i--)) do
if [ -f "$datadir/$configid/model-epoch_$i.pth" ]; then
cont_epoch=$i
break
fi
done
if [ ${cont_epoch} -gt 0 ]; then
python run_spherical_view_syn.py $trainset -e $epochs -m $configid/model-epoch_${cont_epoch}.pth
else
python run_spherical_view_syn.py $trainset -i $configid -e $epochs
fi
python run_spherical_view_syn.py $trainset -t -m $configid/model-epoch_$epochs.pth -o perf
python run_spherical_view_syn.py $testset -t -m $configid/model-epoch_$epochs.pth -o perf
done
......@@ -2,24 +2,24 @@ import torch
import torch.nn.functional as nn_f
from typing import Any, List, Mapping, Tuple
from torch import nn
from utils import view
from utils import misc
from utils.perf import Perf
from . import refine
from utils.view import *
from utils.constants import *
from .post_process import *
from .foveation import Foveation
class GenFinal(object):
class FoveatedNeuralRenderer(object):
def __init__(self, layers_fov: List[float],
layers_res: List[Tuple[int, int]],
full_res: Tuple[int, int],
fovea_net: nn.Module,
periph_net: nn.Module,
device: torch.device = None) -> None:
layers_net: nn.ModuleList,
output_res: Tuple[int, int], *,
using_mask=True,
device: torch.device = None):
super().__init__()
self.layer_cams = [
view.CameraParam({
self.layers_net = layers_net.to(device=device)
self.layers_cam = [
CameraParam({
'fov': layers_fov[i],
'cx': 0.5,
'cy': 0.5,
......@@ -27,68 +27,58 @@ class GenFinal(object):
}, layers_res[i], device=device)
for i in range(len(layers_fov))
]
self.full_cam = view.CameraParam({
self.cam = CameraParam({
'fov': layers_fov[-1],
'cx': 0.5,
'cy': 0.5,
'normalized': True
}, full_res, device=device)
self.fovea_net = fovea_net.to(device)
self.periph_net = periph_net.to(device)
self.foveation = Foveation(layers_fov, full_res, device=device)
}, output_res, device=device)
self.foveation = Foveation(layers_fov, layers_res, output_res, device=device)
self.layers_mask = self.foveation.get_layers_mask() if using_mask else None
self.device = device
def to(self, device: torch.device):
self.fovea_net.to(device)
self.periph_net.to(device)
self.layers_net.to(device)
self.foveation.to(device)
self.full_cam.to(device)
for cam in self.layer_cams:
self.cam.to(device)
for cam in self.layers_cam:
cam.to(device)
if self.layers_mask is not None:
self.layers_mask = self.layers_mask.to(device)
self.device = device
return self
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.gen(*args, **kwds)
def gen(self, gaze, trans: view.Trans, *,
mono_trans: view.Trans = None,
shift: int = 0,
warp_by_depth: bool = False,
ret_raw=False,
perf_time=False) -> Mapping[str, torch.Tensor]:
fovea_cam = self._adjust_cam(self.layer_cams[0], self.full_cam, gaze)
mid_cam = self._adjust_cam(self.layer_cams[1], self.full_cam, gaze)
periph_cam = self.layer_cams[2]
trans_periph = mono_trans if mono_trans != None else trans
perf = Perf(True, True) if perf_time else None
# *_rays_o, *_rays_d: (1, N, 3)
fovea_rays_o, fovea_rays_d = fovea_cam.get_global_rays(trans, True)
mid_rays_o, mid_rays_d = mid_cam.get_global_rays(trans_periph, True)
periph_rays_o, periph_rays_d = periph_cam.get_global_rays(
trans_periph, True)
mid_periph_rays_o = torch.cat([mid_rays_o, periph_rays_o], 1)
mid_periph_rays_d = torch.cat([mid_rays_d, periph_rays_d], 1)
if perf_time:
perf.checkpoint('Get rays')
perf1 = Perf(True, True) if perf_time else None
fovea_inferred, fovea_depth = self._infer(
self.fovea_net, fovea_rays_o, fovea_rays_d, [fovea_cam.res], True)
if perf_time:
perf1.checkpoint('Infer fovea')
mid_inferred, mid_depth, periph_inferred, periph_depth = self._infer(
self.periph_net, mid_periph_rays_o, mid_periph_rays_d,
[mid_cam.res, periph_cam.res], True)
if perf_time:
perf1.checkpoint('Infer mid & periph')
perf.checkpoint('Infer')
return self.render(*args, **kwds)
def render(self, view: Trans, gaze, right_gaze=None, *,
stereo_disparity=0, ret_raw=False) -> Union[Mapping[str, torch.Tensor], Tuple[Mapping[str, torch.Tensor]]]:
if stereo_disparity > TINY_FLOAT:
left_view = Trans(
view.trans_point(torch.tensor([-stereo_disparity / 2, 0, 0], device=view.device())),
view.r)
right_view = Trans(
view.trans_point(torch.tensor([stereo_disparity / 2, 0, 0], device=view.device())),
view.r)
left_gaze = gaze
right_gaze = gaze if right_gaze is None else right_gaze
res_raw_left = [
self._render(i, left_view, left_gaze if i < 2 else None)['color']
for i in range(3)
]
res_raw_right = [
self._render(i, right_view, right_gaze if i < 2 else None)['color']
for i in range(3)
]
return self._gen_output(res_raw_left, left_gaze, ret_raw), \
self._gen_output(res_raw_right, right_gaze, ret_raw)
else:
res_raw = [
self._render(i, view, gaze if i < 2 else None)['color']
for i in range(3)
]
return self._gen_output(res_raw, gaze, ret_raw)
'''
if mono_trans != None and shift == 0: # do warp
fovea_depth[torch.isnan(fovea_depth)] = 50
mid_depth[torch.isnan(mid_depth)] = 50
......@@ -100,8 +90,6 @@ class GenFinal(object):
z_list, mid_inferred, mid_depth)
periph_inferred = self._warp(trans, mono_trans, periph_cam,
z_list, periph_inferred, periph_depth)
if perf_time:
perf.checkpoint('Mono warp')
else:
p = torch.tensor([[0, 0, torch.mean(fovea_depth)]],
device=self.device)
......@@ -109,83 +97,75 @@ class GenFinal(object):
shift = self.full_cam.proj(
p_, center_as_origin=True)[..., 0].item()
shift = round(shift)
if perf_time:
perf.checkpoint('Mono shift')
fovea_refined = refine.grad_aware_median(fovea_inferred, 3, 3, True)
fovea_refined = refine.constrast_enhance(fovea_refined, 3, 0.2)
mid_refined = refine.constrast_enhance(mid_inferred, 5, 0.2)
periph_refined = refine.constrast_enhance(periph_inferred, 5, 0.2)
if perf_time:
perf.checkpoint('Refine')
blended = self.foveation.synthesis([
fovea_refined,
mid_refined,
periph_refined
], (gaze[0], gaze[1]), [0, shift, shift] if shift != 0 else None)
if perf_time:
perf.checkpoint('Blend')
if ret_raw:
return {
'fovea': fovea_refined,
'mid': mid_refined,
'periph': periph_refined,
'blended': blended,
'fovea_raw': fovea_inferred,
'mid_raw': mid_inferred,
'periph_raw': periph_inferred,
'blended_raw': self.foveation.synthesis([
fovea_inferred,
mid_inferred,
periph_inferred
], (gaze[0], gaze[1]))
'''
def _render(self, layer: int, view: Trans, gaze=None, ret_depth=False) -> Mapping[str, torch.Tensor]:
net = self.layers_net[layer]
cam = self.layers_cam[layer]
if gaze is not None:
cam = self._adjust_cam(cam, gaze)
rays_o, rays_d = cam.get_global_rays(view, True) # (1, N, 3)
if self.layers_mask is not None and layer < len(self.layers_mask):
mask = self.layers_mask[layer] >= 0
rays_o = rays_o[:, mask]
rays_d = rays_d[:, mask]
net_output = net(rays_o.view(-1, 3), rays_d.view(-1, 3), ret_depth=ret_depth)
ret = {
'color': torch.zeros(1, cam.res[0], cam.res[1], 3)
}
ret['color'][:, mask] = net_output['color']
ret['color'] = ret['color'].permute(0, 3, 1, 2)
if ret_depth:
ret['depth'] = torch.zeros(1, cam.res[0], cam.res[1])
ret['depth'][:, mask] = net_output['depth']
return ret
else:
net_output = net(rays_o.view(-1, 3), rays_d.view(-1, 3), ret_depth=ret_depth)
return {
'fovea': fovea_refined,
'mid': mid_refined,
'periph': periph_refined,
'blended': blended
'color': net_output['color'].view(1, cam.res[0], cam.res[1], -1).permute(0, 3, 1, 2),
'depth': net_output['depth'].view(1, cam.res[0], cam.res[1]) if ret_depth else None
}
def _infer(self, net, rays_o: torch.Tensor, rays_d: torch.Tensor,
res_list: List[Tuple[int, int]], ret_depth=False):
if ret_depth:
colors, depths = net(rays_o.view(-1, 3), rays_d.view(-1, 3),
ret_depth=True)
else:
colors = net(rays_o.view(-1, 3), rays_d.view(-1, 3))
depths = None
images = []
offset = 0
for res in res_list:
bound = offset + res[0] * res[1]
images.append(colors[offset:bound].view(
1, res[0], res[1], -1).permute(0, 3, 1, 2))
if ret_depth:
images.append(depths[offset:bound].view(
1, res[0], res[1]))
offset = bound
return tuple(images)
def _gen_output(self, layers_img: List[torch.Tensor], gaze: Tuple[float, float], ret_raw=False) -> Mapping[str, torch.Tensor]:
refined = self._post_process(layers_img)
blended = self.foveation.synthesis(refined, gaze)
ret = {
'layers_img': refined,
'blended': blended
}
if ret_raw:
ret['layers_raw'] = layers_img,
ret['blended_raw'] = self.foveation.synthesis(layers_img, gaze)
return ret
def _post_process(self, layers_img: List[torch.Tensor]) -> List[torch.Tensor]:
return [
#grad_aware_median(constrast_enhance(layers_img[0], 3, 0.2), 3, 3, True),
constrast_enhance(layers_img[0], 3, 0.2),
constrast_enhance(layers_img[1], 5, 0.2),
constrast_enhance(layers_img[2], 5, 0.2)
]
def _adjust_cam(self, cam: view.CameraParam, full_cam: view.CameraParam,
gaze: Tuple[float, float]) -> view.CameraParam:
def _adjust_cam(self, layer_cam: CameraParam, gaze: Tuple[float, float]) -> CameraParam:
fovea_offset = (
(gaze[0]) / full_cam.f[0].item() * cam.f[0].item(),
(gaze[1]) / full_cam.f[1].item() * cam.f[1].item()
(gaze[0]) / self.cam.f[0].item() * layer_cam.f[0].item(),
(gaze[1]) / self.cam.f[1].item() * layer_cam.f[1].item()
)
return view.CameraParam({
'fx': cam.f[0].item(),
'fy': cam.f[1].item(),
'cx': cam.c[0].item() - fovea_offset[0],
'cy': cam.c[1].item() - fovea_offset[1]
}, cam.res, device=self.device)
def _warp(self, trans: view.Trans, trans0: view.Trans,
cam: view.CameraParam, z_list: torch.Tensor,
return CameraParam({
'fx': layer_cam.f[0].item(),
'fy': layer_cam.f[1].item(),
'cx': layer_cam.c[0].item() - fovea_offset[0],
'cy': layer_cam.c[1].item() - fovea_offset[1]
}, layer_cam.res, device=self.device)
def _warp(self, trans: Trans, trans0: Trans,
cam: CameraParam, z_list: torch.Tensor,
image: torch.Tensor, depthmap: torch.Tensor) -> torch.Tensor:
"""
[summary]
......
......@@ -9,12 +9,14 @@ from utils import misc
class Foveation(object):
def __init__(self, fov_list: List[float],
out_res: Tuple[int, int], *, device=None):
self.fov_list = fov_list
def __init__(self, layers_fov: List[float], layers_res: List[Tuple[float, float]],
out_res: Tuple[int, int], *, blend=0.6, device=None):
self.layers_fov = layers_fov
self.layers_res = layers_res
self.out_res = out_res
self.blend = blend
self.device = device
self.n_layers = len(self.fov_list)
self.n_layers = len(self.layers_fov)
self.eye_fovea_blend = [
self._gen_layer_blendmap(i)
for i in range(self.n_layers - 1)
......@@ -52,7 +54,8 @@ class Foveation(object):
grid = ((self.coords - c) / R)[None, ...]
if shifts != None:
grid = img.horizontal_shift(grid, shifts[i], -2)
blend = nn_f.grid_sample(self.eye_fovea_blend[i][None, None, ...], grid) # (1, 1, H:out, W:out)
# (1, 1, H:out, W:out)
blend = nn_f.grid_sample(self.eye_fovea_blend[i][None, None, ...], grid)
output.mul_(1 - blend).add_(nn_f.grid_sample(layers[i], grid) * blend)
return output
......@@ -63,10 +66,20 @@ class Foveation(object):
:param i: index of layer
:return: size of layer i in final image (in pixels)
"""
length_i = view.fov2length(self.fov_list[i])
length = view.fov2length(self.fov_list[-1])
k = length_i / length
return int(math.ceil(self.out_res[0] * k))
return self.get_source_layer_cover_size_in_target_layer(
self.layers_fov[i], self.layers_fov[-1], self.out_res[0])
def get_source_layer_cover_size_in_target_layer(self, source_fov, target_fov,
target_pixel_height) -> int:
"""
Get size of layer i in final image
:param i: index of layer
:return: size of layer i in final image (in pixels)
"""
source_physical_height = view.fov2length(source_fov)
target_physical_height = view.fov2length(target_fov)
return int(math.ceil(target_pixel_height * source_physical_height / target_physical_height))
def _gen_layer_blendmap(self, i: int) -> torch.Tensor:
"""
......@@ -79,4 +92,29 @@ class Foveation(object):
R = size / 2
p = misc.meshgrid(size, size).to(device=self.device) # (size, size, 2)
r = torch.norm(p - R, dim=2) # (size, size, 2)
return misc.smooth_step(R, R * 0.6, r)
return misc.smooth_step(R, R * self.blend, r)
def get_layers_mask(self) -> List[torch.Tensor]:
"""
Generate mask images for layers[:-1]
the meaning of values in mask images:
-1: skipped
0~1: blend with inner layer
1~2: only self layer
2~3: blend with outer layer
:return: Mask images for layers except outermost
"""
layers_mask = []
for i in range(self.n_layers - 1):
layers_mask.append(torch.ones(*self.layers_res[i], device=self.device) * -1)
r = torch.norm(misc.meshgrid(*self.layers_res[i], normalize=True).to(device=self.device) * 2 - 1, dim=-1)
inner_radius = self.get_source_layer_cover_size_in_target_layer(
self.layers_fov[i - 1], self.layers_fov[i],
self.layers_res[i][0]) / self.layers_res[i][0] if i > 0 else 0
bounds = [inner_radius * (1 - self.blend), inner_radius, self.blend, 1]
for bi in range(len(bounds) - 1):
region = torch.logical_and(r > bounds[bi], r <= bounds[bi + 1])
layers_mask[i][region] = bi + \
(r[region] - bounds[bi]) / (bounds[bi + 1] - bounds[bi])
return layers_mask
\ No newline at end of file
import cv2
import torch
import numpy as np
import torch.nn.functional as nn_f
from utils import img
from utils.constants import *
def constrast_enhance(image, sigma, fe):
kernel = torch.ones(1, 1, 3, 3, device=image.device) / 9
mean = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1)
cScale = 1.0 + sigma * fe
return torch.clamp(mean + (image - mean) * cScale, 0, 1)
def morph_close(image: torch.Tensor):
image_ = img.torch2np(image)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
image_ = cv2.morphologyEx(image_, cv2.MORPH_CLOSE, kernel)
return img.np2torch(image_).to(image.device)
def get_grad(image: torch.Tensor, k=1, do_morph_close=False):
kernel = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]],
device=image.device, dtype=torch.float32).view(1, 1, 3, 3)
x_grad = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1) / 4
kernel = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
device=image.device, dtype=torch.float32).view(1, 1, 3, 3)
y_grad = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1) / 4
grad = (x_grad ** 2 + y_grad ** 2).sqrt() * k
if do_morph_close:
grad = morph_close(grad)
return grad.clamp(0, 1)
def get_gaussian_kernel(ksize, sigma=0):
if sigma <= 0:
# 根据 kernelsize 计算默认的 sigma,和 opencv 保持一致
sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
center = ksize // 2
xs = (np.arange(ksize, dtype=np.float32) - center) # 元素与矩阵中心的横向距离
kernel1d = np.exp(-(xs ** 2) / (2 * sigma ** 2)) # 计算一维卷积核
# 根据指数函数性质,利用矩阵乘法快速计算二维卷积核
kernel = kernel1d[..., None] @ kernel1d[None, ...]
kernel = torch.from_numpy(kernel)
kernel = kernel / kernel.sum() # 归一化
return kernel.view(1, 1, 3, 3)
def grad_aware_median(image: torch.Tensor, median_kernel_size: int, grad_k: float,
grad_do_morph_close: bool):
image_ = img.torch2np(image)
blur = cv2.medianBlur(image_, median_kernel_size)
blur = img.np2torch(blur).to(image.device)
grad = get_grad(image, grad_k, grad_do_morph_close)
return image * grad + blur * (1 - grad)
def grad_aware_gaussian(image, ksize, sigma=0):
kernel = get_gaussian_kernel(ksize, sigma).to(image.device)
print(kernel.size())
blur = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1)
grad = get_grad(image)
return image * grad + blur * (1 - grad)
def bilateral_filter(batch_img, ksize, sigmaColor=None, sigmaSpace=None):
device = batch_img.device
if sigmaSpace is None:
sigmaSpace = 0.15 * ksize + 0.35 # 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
if sigmaColor is None:
sigmaColor = sigmaSpace
pad = (ksize - 1) // 2
batch_img_pad = nn_f.pad(
batch_img, pad=[pad, pad, pad, pad], mode='reflect')
# batch_img 的维度为 BxcxHxW, 因此要沿着第 二、三维度 unfold
# patches.shape: B x C x H x W x ksize x ksize
patches = batch_img_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
patch_dim = patches.dim() # 6
# 求出像素亮度差
diff_color = patches - batch_img.unsqueeze(-1).unsqueeze(-1)
# 根据像素亮度差,计算权重矩阵
weights_color = torch.exp(-(diff_color ** 2) / (2 * sigmaColor ** 2))
# 归一化权重矩阵
weights_color = weights_color / \
weights_color.sum(dim=(-1, -2), keepdim=True)
# 获取 gaussian kernel 并将其复制成和 weight_color 形状相同的 tensor
weights_space = get_gaussian_kernel(ksize, sigmaSpace).to(device)
weights_space_dim = (patch_dim - 2) * (1,) + (ksize, ksize)
weights_space = weights_space.view(
*weights_space_dim).expand_as(weights_color)
# 两个权重矩阵相乘得到总的权重矩阵
weights = weights_space * weights_color
# 总权重矩阵的归一化参数
weights_sum = weights.sum(dim=(-1, -2))
# 加权平均
weighted_pix = (weights * patches).sum(dim=(-1, -2)) / weights_sum
return weighted_pix
import cv2
import torch
import numpy as np
import torch.nn.functional as nn_f
from utils import view
from utils import img
from utils.constants import *
class GuideRefinement(object):
def __init__(self, guides_image, guides_view: view.Trans,
......@@ -48,115 +46,3 @@ class GuideRefinement(object):
print(warp.size(), warped_diff.size())
avg_diff = torch.mean(warped_diff, 0)
return image * (1 + avg_diff)
def constrast_enhance(image, sigma, fe):
kernel = torch.ones(1, 1, 3, 3, device=image.device) / 9
mean = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1)
cScale = 1.0 + sigma * fe
return torch.clamp(mean + (image - mean) * cScale, 0, 1)
def morph_close(image: torch.Tensor):
image_ = img.torch2np(image)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
image_ = cv2.morphologyEx(image_, cv2.MORPH_CLOSE, kernel)
return img.np2torch(image_).to(image.device)
def get_grad(image: torch.Tensor, k=1, do_morph_close=False):
kernel = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]],
device=image.device, dtype=torch.float32).view(1, 1, 3, 3)
x_grad = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1) / 4
kernel = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
device=image.device, dtype=torch.float32).view(1, 1, 3, 3)
y_grad = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1) / 4
grad = (x_grad ** 2 + y_grad ** 2).sqrt() * k
if do_morph_close:
grad = morph_close(grad)
return grad.clamp(0, 1)
def getGaussianKernel(ksize, sigma=0):
if sigma <= 0:
# 根据 kernelsize 计算默认的 sigma,和 opencv 保持一致
sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
center = ksize // 2
xs = (np.arange(ksize, dtype=np.float32) - center) # 元素与矩阵中心的横向距离
kernel1d = np.exp(-(xs ** 2) / (2 * sigma ** 2)) # 计算一维卷积核
# 根据指数函数性质,利用矩阵乘法快速计算二维卷积核
kernel = kernel1d[..., None] @ kernel1d[None, ...]
kernel = torch.from_numpy(kernel)
kernel = kernel / kernel.sum() # 归一化
return kernel.view(1, 1, 3, 3)
def grad_aware_median(image: torch.Tensor, median_kernel_size: int, grad_k: float,
grad_do_morph_close: bool):
image_ = img.torch2np(image)
blur = cv2.medianBlur(image_, median_kernel_size)
blur = img.np2torch(blur).to(image.device)
grad = get_grad(image, grad_k, grad_do_morph_close)
return image * grad + blur * (1 - grad)
def grad_aware_gaussian(image, ksize, sigma=0):
kernel = getGaussianKernel(ksize, sigma).to(image.device)
print(kernel.size())
blur = torch.cat([
nn_f.conv2d(image[:, 0:1], kernel, padding=1),
nn_f.conv2d(image[:, 1:2], kernel, padding=1),
nn_f.conv2d(image[:, 2:3], kernel, padding=1)
], 1)
grad = get_grad(image)
return image * grad + blur * (1 - grad)
def bilateral_filter(batch_img, ksize, sigmaColor=None, sigmaSpace=None):
device = batch_img.device
if sigmaSpace is None:
sigmaSpace = 0.15 * ksize + 0.35 # 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
if sigmaColor is None:
sigmaColor = sigmaSpace
pad = (ksize - 1) // 2
batch_img_pad = nn_f.pad(
batch_img, pad=[pad, pad, pad, pad], mode='reflect')
# batch_img 的维度为 BxcxHxW, 因此要沿着第 二、三维度 unfold
# patches.shape: B x C x H x W x ksize x ksize
patches = batch_img_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
patch_dim = patches.dim() # 6
# 求出像素亮度差
diff_color = patches - batch_img.unsqueeze(-1).unsqueeze(-1)
# 根据像素亮度差,计算权重矩阵
weights_color = torch.exp(-(diff_color ** 2) / (2 * sigmaColor ** 2))
# 归一化权重矩阵
weights_color = weights_color / \
weights_color.sum(dim=(-1, -2), keepdim=True)
# 获取 gaussian kernel 并将其复制成和 weight_color 形状相同的 tensor
weights_space = getGaussianKernel(ksize, sigmaSpace).to(device)
weights_space_dim = (patch_dim - 2) * (1,) + (ksize, ksize)
weights_space = weights_space.view(
*weights_space_dim).expand_as(weights_color)
# 两个权重矩阵相乘得到总的权重矩阵
weights = weights_space * weights_color
# 总权重矩阵的归一化参数
weights_sum = weights.sum(dim=(-1, -2))
# 加权平均
weighted_pix = (weights * patches).sum(dim=(-1, -2)) / weights_sum
return weighted_pix
def update_config(config):
# Net parameters
config.NET_TYPE = 'snerffast4'
config.N_ENCODE_DIM = 10
config.N_ENCODE_DIM = 6
#config.N_DIR_ENCODE = 4
config.FC_PARAMS.update({
'nf': 256,
'n_layers': 8
'nf': 512,
'n_layers': 4
})
config.SAMPLE_PARAMS.update({
'depth_range': (0.7, 10),
'depth_range': (1, 30),
'n_samples': 64,
'perturb_sample': False
})
def update_config(config):
# Net parameters
config.NET_TYPE = 'snerffast4'
config.N_ENCODE_DIM = 6
#config.N_DIR_ENCODE = 4
config.FC_PARAMS.update({
'nf': 256,
'n_layers': 8
})
config.SAMPLE_PARAMS.update({
'depth_range': (1, 50),
'n_samples': 64,
'perturb_sample': False
})
......@@ -5,10 +5,10 @@ def update_config(config):
#config.N_DIR_ENCODE = 4
config.FC_PARAMS.update({
'nf': 256,
'n_layers': 8
'n_layers': 4
})
config.SAMPLE_PARAMS.update({
'depth_range': (1, 50),
'depth_range': (2, 50),
'n_samples': 64,
'perturb_sample': False
})
def update_config(config):
# Net parameters
config.NET_TYPE = 'snerffast4'
config.N_ENCODE_DIM = 6
#config.N_DIR_ENCODE = 4
config.FC_PARAMS.update({
'nf': 256,
'n_layers': 4
})
config.SAMPLE_PARAMS.update({
'depth_range': (1.2, 6),
'n_samples': 64,
'perturb_sample': False
})
def update_config(config):
# Net parameters
config.NET_TYPE = 'snerffastnew'
config.N_ENCODE_DIM = 6
#config.N_DIR_ENCODE = 4
config.FC_PARAMS.update({
'nf': 256,
'n_layers': 8
})
config.SAMPLE_PARAMS.update({
'depth_range': (0.3, 7),
'n_samples': 64,
'perturb_sample': False
})
......@@ -7,6 +7,7 @@ from nets.msl_net_new import NewMslNet
from nets.msl_ray import MslRay
from nets.msl_fast import MslFast
from nets.snerf_fast import SnerfFast
from nets.snerf_fast_new import SnerfFastNew
from nets.cnerf_v3 import CNerf
from nets.nerf import CascadeNerf
from nets.nerf import CascadeNerf2
......@@ -281,6 +282,13 @@ class SphericalViewSynConfig(object):
coord_encode=self.N_ENCODE_DIM,
dir_encode=self.N_DIR_ENCODE,
multiple_net=False)
if self.NET_TYPE.startswith('snerffastnew'):
return SnerfFastNew(fc_params=self.FC_PARAMS,
sampler_params=self.SAMPLE_PARAMS,
n_parts=int(self.NET_TYPE[12:] if len(self.NET_TYPE) > 12 else 1),
normalize_coord=self.NORMALIZE,
c=self.COLOR,
coord_encode=self.N_ENCODE_DIM)
if self.NET_TYPE.startswith('snerffast'):
return SnerfFast(fc_params=self.FC_PARAMS,
sampler_params=self.SAMPLE_PARAMS,
......
......@@ -98,10 +98,6 @@ class SphericalViewSynDataset(object):
disp_val = (1 - input[..., 0, :, :]) * (disp_range[1] - disp_range[0]) + disp_range[0]
return torch.reciprocal(disp_val)
def _euler_to_matrix(self, euler):
q = glm.quat(glm.radians(glm.vec3(euler[0], euler[1], euler[2])))
return glm.transpose(glm.mat3_cast(q)).to_list()
def _load_desc(self, path, res=None):
with open(path, 'r', encoding='utf-8') as file:
data_desc = json.loads(file.read())
......@@ -128,7 +124,7 @@ class SphericalViewSynDataset(object):
self.view_centers = torch.tensor(
data_desc['view_centers'], device=device.default()) # (N, 3)
self.view_rots = torch.tensor(
[self._euler_to_matrix([rot[1], rot[0], 0]) for rot in data_desc['view_rots']]
[view.euler_to_matrix([rot[1], rot[0], 0]) for rot in data_desc['view_rots']]
if len(data_desc['view_rots'][0]) == 2 else data_desc['view_rots'],
device=device.default()).view(-1, 3, 3) # (N, 3, 3)
#self.view_centers = self.view_centers[:6]
......@@ -139,7 +135,7 @@ class SphericalViewSynDataset(object):
if 'gl_coord' in data_desc and data_desc['gl_coord'] == True:
print('Convert from OGL coordinate to DX coordinate (i. e. flip z axis)')
if not data_desc['cam_params'].get('normalized'):
if not data_desc['cam_params'].get('fov'):
self.cam_params.f[1] *= -1
self.view_centers[:, 2] *= -1
self.view_rots[:, 2] *= -1
......
......@@ -72,7 +72,7 @@ out_desc_name = args.output
out_desc = dataset_desc.copy()
out_desc['view_file_pattern'] = f"{out_desc_name}/{dataset_desc['view_file_pattern'].split('/')[-1]}"
views = []
for idx in product([0, 2, 4], [0, 2, 4], [0, 2, 4], list(range(9)), [1]):#, [0, 2, 3, 5], [1, 2, 3, 4]):
for idx in product([1, 4], [1, 4], [1, 4], [3, 4, 5]):#, [0, 2, 3, 5], [1, 2, 3, 4]):
views += indices[idx].flatten().tolist()
out_desc['samples'] = [len(views)]
out_desc['views'] = views
......
......@@ -142,6 +142,21 @@ def render_rays(ray_batch,
weights = alpha * \
tf.math.cumprod(1.-alpha + 1e-10, axis=-1, exclusive=True)
try:
tf.debugging.check_numerics(alpha, 'alpha')
except Exception as err:
print('alpha check failed')
try:
tf.debugging.check_numerics(dists, 'dists')
except Exception as err:
print('dists check failed')
try:
tf.debugging.check_numerics(tf.linalg.norm(rays_d[..., None, :], axis=-1), 'rays_d norm')
except Exception as err:
print('rays_d norm check failed')
print(rays_d.eval())
# Computed weighted color of each sample along each ray.
rgb_map = tf.reduce_sum(
weights[..., None] * rgb, axis=-2) # [N_rays, 3]
......
import math
import torch
import torch.nn as nn
from torch.nn.modules import module
from .modules import *
from utils import sphere
from utils import color
class SnerfFastNew(nn.Module):
def __init__(self, fc_params, sampler_params, n_parts: int, normalize_coord: bool,
c: int = color.RGB, coord_encode: int = 0):
"""
Initialize a multi-sphere-layer net
:param fc_params: parameters for full-connection network
:param sampler_params: parameters for sampler
:param normalize_coord: whether normalize the spherical coords to [0, 2pi] before encode
:param c: color mode
:param encode_to_dim: encode input to number of dimensions
"""
super().__init__()
self.color = c
self.normalize_coord = normalize_coord
self.n_samples = sampler_params['n_samples']
self.n_parts = n_parts
self.samples_per_part = self.n_samples // self.n_parts
self.coord_chns = 3
self.color_chns = color.chns(self.color)
self.coord_encoder = InputEncoder.Get(coord_encode, self.coord_chns)
self.nets = nn.ModuleList([
FcNet(in_chns=self.coord_encoder.out_dim * self.samples_per_part,
out_chns=0, nf=fc_params['nf'], n_layers=fc_params['n_layers'],
activation=fc_params['activation'])
for _ in range(self.n_parts)
])
self.decoder = FcNet(in_chns=fc_params['nf'] + self.coord_encoder.out_dim,
out_chns=self.color_chns + 1, nf=128, n_layers=2,
activation=fc_params['activation'])
self.sampler = Sampler(**sampler_params)
self.rendering = NewRendering()
def forward(self, rays_o: torch.Tensor, rays_d: torch.Tensor,
ret_depth=False, debug=False) -> torch.Tensor:
"""
rays -> colors
:param rays_o `Tensor(B, 3)`: rays' origin
:param rays_d `Tensor(B, 3)`: rays' direction
:return: `Tensor(B, C)``, inferred images/pixels
"""
coords, pts, depths = self.sampler(rays_o, rays_d)
coords_encoded = self.coord_encoder(coords[..., -self.coord_chns:])
densities = torch.empty(rays_o.size(0), self.n_samples, device=device.default())
colors = torch.empty(rays_o.size(0), self.n_samples, self.color_chns,
device=device.default())
for i, net in enumerate(self.nets):
s = slice(i * self.samples_per_part, (i + 1) * self.samples_per_part)
feature_out = net(coords_encoded[:, s].flatten(1, 2))
for si in range(i * self.samples_per_part, (i + 1) * self.samples_per_part):
decoder_out = self.decoder(torch.cat([feature_out, coords_encoded[:, si]], dim=-1))
colors[:, si] = decoder_out[..., :-1]
densities[:, si] = decoder_out[..., -1]
densities = densities
return self.rendering(colors.view(-1, self.n_samples, self.color_chns),
densities, depths, ret_depth=ret_depth, debug=debug)
......@@ -14,7 +14,7 @@
"\n",
"from utils import img\n",
"\n",
"os.chdir('/home/dengnc/deep_view_syn/data/__0_user_study/gas_fovea_r50x50_t0.3')\n",
"os.chdir('../data/__0_user_study/gas_fovea_r50x50_t0.3')\n",
"compares = {\n",
" 'Ground truth': 'train/view_%04d.png',\n",
" 'Old (128x4:32:1-50m)': 'us_fovea@nmsl-rgb_e10_fc128x4_d1-50_s32/output/model-epoch_200/train/out_%04d.png',\n",
......@@ -51,7 +51,7 @@
"\n",
"from utils import img\n",
"\n",
"os.chdir('/home/dengnc/deep_view_syn/data/__0_user_study/gas_fovea_r50x50_t0.3')\n",
"os.chdir('../data/__0_user_study/gas_fovea_r50x50_t0.3')\n",
"compares = {\n",
" 'Ground truth': 'train/view_%04d.png',\n",
" 'Encode 4': 'mslray@mslray-rgb_e10_fc256x8_d0.50-5.00_s128_~p/output/model-epoch_50/train/out_%04d.png',\n",
......@@ -81,7 +81,7 @@
"import compare\n",
"\n",
"\n",
"os.chdir('/home/dengnc/deep_view_syn/data/classroom_fovea_r40x40_t0.3')\n",
"os.chdir('../data/classroom_fovea_r40x40_t0.3')\n",
"dataset = 'test'\n",
"compares = {\n",
" 'Old': ['fovea@msl-rgb_e10_fc128x4_d0.50-5.00_s128', 80, 'out_%04d.png'],\n",
......@@ -112,7 +112,7 @@
"\n",
"from utils import img\n",
"\n",
"os.chdir('/home/dengnc/deep_view_syn/data/study_fovea_2021.01.23')\n",
"os.chdir('../data/study_fovea_2021.01.23')\n",
"compares = {\n",
" 'Ground truth': 'train/view_%04d.png',\n",
" 'Old (128x4:32:1-5m)': 'us_fovea@nmsl-rgb_e10_fc128x4_d1.00-5.00_s32/output/model-epoch_300/train/out_view_%04d.png',\n",
......@@ -147,7 +147,7 @@
"\n",
"from utils import img\n",
"\n",
"os.chdir('/home/dengnc/deep_view_syn/data/gas_fovea_r90x30_t0.9_2021.01.25')\n",
"os.chdir('../data/gas_fovea_r90x30_t0.9_2021.01.25')\n",
"compares = {\n",
" 'Ground truth': 'train/view_%04d.png',\n",
" 'Old (128x4:32:1-50m)': 'fovea_rgb@msl-rgb_e10_fc128x4_d1.00-50.00_s32/output/model-epoch_50/train/out_%04d.png',\n",
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:0 as current device.\n"
]
}
],
"source": [
"import sys\n",
"import os\n",
"import torch\n",
"import torch.nn as nn\n",
"import matplotlib.pyplot as plt\n",
"\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(0)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"torch.autograd.set_grad_enabled(False)\n",
"\n",
"from data.spherical_view_syn import *\n",
"from configs.spherical_view_syn import SphericalViewSynConfig\n",
"from utils import netio\n",
"from utils import img\n",
"from utils import device\n",
"from utils.view import *\n",
"from components.fnr import FoveatedNeuralRenderer\n",
"\n",
"\n",
"def load_net(path):\n",
" config = SphericalViewSynConfig()\n",
" config.from_id(os.path.splitext(path)[0])\n",
" config.SAMPLE_PARAMS['perturb_sample'] = False\n",
" net = config.create_net().to(device.default())\n",
" netio.load(path, net)\n",
" return net\n",
"\n",
"\n",
"def find_file(prefix):\n",
" for path in os.listdir():\n",
" if path.startswith(prefix):\n",
" return path\n",
" return None\n",
"\n",
"\n",
"def load_views(data_desc_file) -> Trans:\n",
" with open(data_desc_file, 'r', encoding='utf-8') as file:\n",
" data_desc = json.loads(file.read())\n",
" view_centers = torch.tensor(\n",
" data_desc['view_centers'], device=device.default()).view(-1, 3)\n",
" view_rots = torch.tensor(\n",
" data_desc['view_rots'], device=device.default()).view(-1, 3, 3)\n",
" return Trans(view_centers, view_rots)\n",
"\n",
"\n",
"def plot_images(images):\n",
" plt.figure(figsize=(12, 4))\n",
" plt.subplot(131)\n",
" img.plot(images['layers_img'][0])\n",
" plt.subplot(132)\n",
" img.plot(images['layers_img'][1])\n",
" plt.subplot(133)\n",
" img.plot(images['layers_img'][2])\n",
" plt.figure(figsize=(12, 12))\n",
" img.plot(images['blended'])\n",
"\n",
"\n",
"scenes = {\n",
" 'classroom': 'classroom_all',\n",
" 'stones': 'stones_all',\n",
" 'barbershop': 'barbershop_all',\n",
" 'lobby': 'lobby_all'\n",
"}\n",
"\n",
"fov_list = [20, 45, 110]\n",
"res_list = [(256, 256), (256, 256), (256, 230)]\n",
"res_full = (1600, 1440)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/dvs/data/__new/lobby_all\n",
"Load net from fovea@snerffast4-rgb_e6_fc512x4_d2.00-50.00_s64_~p.pth ...\n",
"Load net from periph@snerffast4-rgb_e6_fc256x4_d2.00-50.00_s64_~p.pth ...\n"
]
}
],
"source": [
"scene = 'lobby'\n",
"os.chdir(f'{rootdir}/data/__new/{scenes[scene]}')\n",
"print('Change working directory to ', os.getcwd())\n",
"\n",
"fovea_net = load_net(find_file('fovea'))\n",
"periph_net = load_net(find_file('periph'))\n",
"renderer = FoveatedNeuralRenderer(fov_list, res_list, nn.ModuleList([fovea_net, periph_net, periph_net]),\n",
" res_full, device=device.default())\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/dengnc/miniconda3/lib/python3.8/site-packages/torch/nn/functional.py:3828: UserWarning: Default grid_sample and affine_grid behavior has changed to align_corners=False since 1.3.0. Please specify align_corners=True if the old behavior is desired. See the documentation of grid_sample for details.\n",
" warnings.warn(\n"
]
}
],
"source": [
"params = {\n",
" 'classroom': [\n",
" [0, 0, 0, 0, 0, 0, 0],\n",
" [0, 0, 0, -53, 0, 0, 0],\n",
" [0, 0, 0, 20, -20, 0, 0]\n",
" ],\n",
" 'stones': [\n",
" [0, 0, 0, 0, 10, -300, -50],\n",
" [0, 0, 0, 0, 10, 200, -50]\n",
" ],\n",
" 'barbershop': [\n",
" [0, 0, 0, 20, 0, -300, 50],\n",
" [0, 0, 0, -140, -30, 150, -250],\n",
" [0, 0, 0, -60, -30, 75, -125],\n",
" ],\n",
" 'lobby': [\n",
" #[0, 0, 0, 0, 0, 75, 0],\n",
" #[0, 0, 0, 0, 0, 5, 150],\n",
" [0, 0, 0, -120, 0, 75, 50],\n",
" ]\n",
"}\n",
"\n",
"for i, param in enumerate(params[scene]):\n",
" view = Trans(torch.tensor(param[:3], device=device.default()),\n",
" torch.tensor(euler_to_matrix([-param[4], param[3], 0]), device=device.default()).view(3, 3))\n",
" images = renderer(view, param[-2:])\n",
" if False:\n",
" outputdir = '../__demo/mono/'\n",
" misc.create_dir(outputdir)\n",
" img.save(images['layers_img'][0], f'{outputdir}{scene}_{i}_fovea.png')\n",
" img.save(images['layers_img'][1], f'{outputdir}{scene}_{i}_mid.png')\n",
" img.save(images['layers_img'][2], f'{outputdir}{scene}_{i}_periph.png')\n",
" img.save(images['blended'], f'{outputdir}{scene}_{i}_blended.png')\n",
" else:\n",
" images = plot_images(images)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Load Dataset\n",
"views = load_views('train.json')\n",
"print('Dataset loaded.')\n",
"print('views:', views.size())\n",
"for view_idx in range(views.size()[0]):\n",
" center = (0, 0)\n",
" test_view = views.get(view_idx)\n",
" render_view(test_view, center)\n",
" '''\n",
" images = gen(center, test_view)\n",
" outputdir = '../__2_demo/layer_blend/'\n",
" misc.create_dir(outputdir)\n",
" for key in images:\n",
" img.save(images[key], outputdir + '%s_view%04d_%s.png' % (scene, view_idx, key))\n",
" '''\n",
" '''\n",
" images = gen(\n",
" center, test_view,\n",
" mono_trans=Trans(test_view.trans_point(\n",
" torch.tensor([0.03, 0, 0], device=device.default())\n",
" ), test_view.r))\n",
" outputdir = '../__2_demo/output_mono/ref_as_right_eye/'\n",
" misc.create_dir(outputdir)\n",
" for key in images:\n",
" key = 'blended'\n",
" img.save(images[key], outputdir + '%s_view%04d_%s.png' % (scene, view_idx, key))\n",
" '''\n",
" '''\n",
" left_images = gen(center,\n",
" Trans(\n",
" test_view.trans_point(\n",
" torch.tensor([-0.03, 0, 0], device=device.default())\n",
" ),\n",
" test_view.r),\n",
" mono_trans=test_view)\n",
" right_images = gen(center, Trans(\n",
" test_view.trans_point(\n",
" torch.tensor([0.03, 0, 0], device=device.default())\n",
" ), test_view.r), mono_trans=test_view)\n",
" outputdir = '../__2_demo/mono_periph/stereo/'\n",
" misc.create_dir(outputdir)\n",
" key = 'blended'\n",
" img.save(left_images[key], outputdir + '%s_view%04d_%s_l.png' % (scene, view_idx, key))\n",
" img.save(right_images[key], outputdir + '%s_view%04d_%s_r.png' % (scene, view_idx, key))\n",
" '''\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.8.5 64-bit ('base': conda)",
"name": "python385jvsc74a57bd082066b63b621a9e3d15e3b7c11ca76da6238eff3834294910d715044bd0561e5"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
},
"metadata": {
"interpreter": {
"hash": "82066b63b621a9e3d15e3b7c11ca76da6238eff3834294910d715044bd0561e5"
}
},
"orig_nbformat": 2
},
"nbformat": 4,
"nbformat_minor": 2
}
\ No newline at end of file
......@@ -2,24 +2,17 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:2 as current device.\n"
]
}
],
"outputs": [],
"source": [
"import sys\n",
"import os\n",
"import torch\n",
"import matplotlib.pyplot as plt\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
......@@ -32,7 +25,7 @@
"from utils import img\n",
"from utils import device\n",
"from utils import view\n",
"from components.gen_final import GenFinal\n",
"from components.fnr import FoveatedNeuralRenderer\n",
"\n",
"\n",
"def load_net(path):\n",
......@@ -111,21 +104,9 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/bedroom_all_in_one\n",
"Load net from fovea@nmsl-rgb_e10_fc256x4_d1.00-50.00_s32.pth ...\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1.00-50.00_s16.pth ...\n",
"Dataset loaded.\n",
"views: [3]\n"
]
}
],
"outputs": [],
"source": [
"centers = {\n",
" 'gas': [\n",
......@@ -147,7 +128,7 @@
" ]\n",
"}\n",
"scene = 'bedroom'\n",
"os.chdir(sys.path[0] + '/../data/' + scenes[scene])\n",
"os.chdir(os.path.join(rootdir, f'data/{scenes[scene]}'))\n",
"print('Change working directory to ', os.getcwd())\n",
"\n",
"fovea_net = load_net(find_file('fovea'))\n",
......@@ -172,7 +153,7 @@
" ), test_view.r), mono_trans=test_view)\n",
" #plot_fovea(left_images, right_images, centers[scene][view_idx][0],\n",
" # centers[scene][view_idx][1])\n",
" outputdir = '/home/dengnc/deep_view_syn/data/__2_demo/mono_periph/stereo/'\n",
" outputdir = '../__2_demo/mono_periph/stereo/'\n",
" misc.create_dir(outputdir)\n",
" # for key in images:\n",
" key = 'blended'\n",
......@@ -191,7 +172,7 @@
" ), test_view.r))\n",
" #plot_fovea(left_images, right_images, centers[scene][view_idx][0],\n",
" # centers[scene][view_idx][1])\n",
" outputdir = '/home/dengnc/deep_view_syn/data/__2_demo/stereo/'\n",
" outputdir = '../__2_demo/stereo/'\n",
" misc.create_dir(outputdir)\n",
" # for key in images:\n",
" key = 'blended'\n",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment