Commit 6294701e authored by Nianchen Deng's avatar Nianchen Deng
Browse files

sync

parent 2824f796
......@@ -108,6 +108,6 @@ output/
outputE/
# Data
data/
/data
bin/
\ No newline at end of file
......@@ -23,18 +23,18 @@
"request": "launch",
"program": "train.py",
"args": [
//"-c",
//"snerf_voxels+ls+f32",
"/data1/dnc/dvs/data/__nerf/room/_nets/train/snerf_voxels+ls+f32/checkpoint_1.tar",
"--prune",
"1",
"--split",
"1",
"-e",
"100",
"--views",
"5",
//"data/__nerf/room/train.json"
"-c",
"_lr_snerf_voxels+ls",
//"/home/dengnc/dvs/data/classroom/_nets/pano_t0.8/smnerf_voxels+ls+lbl/checkpoint_35.tar",
//"--prune",
//"1",
//"--split",
//"1",
//"-e",
//"100",
//"--views",
//"5",
"data/classroom/lr_view_t0.8_r360x80_train"
],
"justMyCode": false,
"console": "integratedTerminal"
......@@ -46,13 +46,13 @@
"program": "test.py",
"args": [
"-m",
"/home/dengnc/dvs/data/__new/barbershop_fovea_r360x80_t0.6/_nets/train_t0.3/snerfadv_voxels+ls2/checkpoint_50.tar",
"/home/dengnc/dvs/data/classroom/_nets/ms_train_t0.8/_cnerf/checkpoint_50.tar",
"-o",
"perf",
"color",
"--output-type",
"image",
"/home/dengnc/dvs/data/__new/barbershop_fovea_r360x80_t0.6/test_t0.3.json",
"/home/dengnc/dvs/data/classroom/lr_view_t0.8_r360x80_test.json",
"--views",
"1"
],
......
......@@ -40,7 +40,7 @@ $ ffmpeg -y -r 50 -i %04d.png -c:v libx264 -vframes 600 ../classroom_hmd_mono_hi
## 2. Convert onnx to tensorRT
```
$ trtexec --onnx=net@256x256x2.onnx --fp16 --saveEngine=net@256x256x2.trt --workspace=4096
$ trtexec --onnx=in.onnx --fp16 --saveEngine=out.trt --workspace=4096
```
# Install FFMpeg with Extra Codecs:
......
#!/bin/bash
test_dataset=$1
test_model_dir=$2
for i in "$test_model_dir"*
do
python test.py -m "$(pwd)/$i/checkpoint_50.tar" -o perf color --output-type image "$test_dataset"
done
echo Test Finished
ls $test_model_dir/*/output_50/perf* | awk -F"/" '{print $6, "\t", $8}'
\ No newline at end of file
import bpy
import math
import json
import os
import math
......
......@@ -21,7 +21,7 @@ import torch.nn as nn
import sys
import numpy as np
from utils.geometry import discretize_points
from utils.constants import HUGE_FLOAT
from utils import math
try:
import builtins
......@@ -381,7 +381,7 @@ def inverse_cdf_sampling(pts_idx: torch.Tensor, min_depth: torch.Tensor, max_dep
def _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=False):
# uniform sampling
_min_depth = min_depth.min(1)[0]
_max_depth = max_depth.masked_fill(max_depth.eq(HUGE_FLOAT), 0).max(1)[0]
_max_depth = max_depth.masked_fill(max_depth.eq(math.huge), 0).max(1)[0]
max_ray_length = (_max_depth - _min_depth).max()
delta = torch.arange(int(max_ray_length / MARCH_SIZE),
......@@ -413,9 +413,9 @@ def _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, determinis
sampled_depth.masked_fill_(
(max_ids.ne(min_ids)) |
(sampled_depth > _max_depth[:, None]) |
(sampled_dists == 0.0), HUGE_FLOAT)
(sampled_dists == 0.0), math.huge)
sampled_depth, ordered_index = sampled_depth.sort(-1) # sort again
sampled_masks = sampled_depth.eq(HUGE_FLOAT)
sampled_masks = sampled_depth.eq(math.huge)
num_max_steps = (~sampled_masks).sum(-1).max()
sampled_depth = sampled_depth[:, :num_max_steps]
......@@ -456,7 +456,7 @@ def parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, determinist
return xt
sampled_idx = padding_points(sampled_idx, -1)
sampled_depth = padding_points(sampled_depth, HUGE_FLOAT)
sampled_depth = padding_points(sampled_depth, math.huge)
sampled_dists = padding_points(sampled_dists, 0.0)
return sampled_idx, sampled_depth, sampled_dists
......
......@@ -3,7 +3,7 @@ import torch.nn.functional as nn_f
from typing import Any, List, Mapping, Tuple
from torch import nn
from utils.view import *
from utils.constants import *
from utils import math
from .post_process import *
from .foveation import Foveation
......@@ -52,7 +52,7 @@ class FoveatedNeuralRenderer(object):
using_mask=True,
mono_periph_mode=0,
ret_raw=False) -> Union[Mapping[str, torch.Tensor], Tuple[Mapping[str, torch.Tensor]]]:
if stereo_disparity > TINY_FLOAT:
if stereo_disparity > math.tiny:
left_view = Trans(
view.trans_point(torch.tensor([-stereo_disparity / 2, 0, 0], device=self.device)),
view.r)
......
import math
import torch
import torch.nn.functional as nn_f
from typing import List, Tuple
from utils import img
from utils import view
from utils import misc
from utils import math
class Foveation(object):
......
......@@ -3,7 +3,6 @@ import torch
import numpy as np
import torch.nn.functional as nn_f
from utils import img
from utils.constants import *
def constrast_enhance(image, sigma, fe):
......
import torch
import torch.nn.functional as nn_f
from utils import view
from utils.constants import *
from utils import math
class GuideRefinement(object):
......@@ -15,7 +15,7 @@ class GuideRefinement(object):
for i in range(guides_image.size(0))
], 0)
self.guides_diff = (guides_image - guides_inferred) / \
(guides_inferred + TINY_FLOAT)
(guides_inferred + math.tiny)
self.guides_view = guides_view
self.guides_cam = guides_cam
......
{
"model": "CNeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"space": "voxels",
"steps": [4, 16, 8],
"interp_on_coarse": false,
"sub_models": [
{
"core_params": {
"nf": 256,
"n_layers": 2
},
"n_samples": 16
},
{
"core_params": {
"nf": 256,
"n_layers": 2,
"f_chns": 256
},
"n_samples": 64
},
{
"core_params": {
"nf": 256,
"n_layers": 2,
"f_chns": 256
},
"n_samples": 128
}
]
},
"train": {
"max_epochs": 150,
"prune_epochs": [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150],
"split_epochs": [10, 30, 70, 120],
"freeze_epochs": [50, 100],
"checkpoint_interval": 10,
"level_by_level": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "CNeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"space": "voxels",
"steps": [4, 16, 8],
"interp_on_coarse": true,
"sub_models": [
{
"core_params": {
"nf": 256,
"n_layers": 2
},
"n_samples": 16
},
{
"core_params": {
"nf": 256,
"n_layers": 2,
"f_chns": 256
},
"n_samples": 64
},
{
"core_params": {
"nf": 256,
"n_layers": 2,
"f_chns": 256
},
"n_samples": 128
}
]
},
"train": {
"max_epochs": 150,
"prune_epochs": [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150],
"split_epochs": [10, 30, 70, 120],
"freeze_epochs": [50, 100],
"checkpoint_interval": 10,
"level_by_level": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "CNeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core": "nerfadv",
"space": "voxels",
"steps": [4, 16, 8],
"interp_on_coarse": false,
"sub_models": [
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined"
},
"n_samples": 16
},
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined",
"f_chns": 256
},
"n_samples": 64
},
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined",
"f_chns": 256
},
"n_samples": 128
}
]
},
"train": {
"max_epochs": 150,
"prune_epochs": [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150],
"split_epochs": [10, 30, 70, 120],
"freeze_epochs": [50, 100],
"checkpoint_interval": 10,
"level_by_level": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "CNeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core": "nerfadv",
"space": "voxels",
"steps": [4, 16, 8],
"interp_on_coarse": true,
"sub_models": [
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined"
},
"n_samples": 16
},
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined",
"f_chns": 256
},
"n_samples": 64
},
{
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined",
"f_chns": 256
},
"n_samples": 128
}
]
},
"train": {
"max_epochs": 150,
"prune_epochs": [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150],
"split_epochs": [10, 30, 70, 120],
"freeze_epochs": [50, 100],
"checkpoint_interval": 10,
"level_by_level": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "NeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core_params": {
"nf": 256,
"n_layers": 8,
"skips": [4]
},
"space": "voxels",
"steps": [16, 64, 32],
"n_samples": 64
},
"train": {
"max_epochs": 50,
"prune_epochs": [10],
"split_epochs": [10, 30],
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "NeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core": "nerfadv",
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 4
},
"color_net": {
"nf": 256,
"n_layers": 3
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined"
},
"space": "voxels",
"steps": [16, 64, 32],
"n_samples": 64
},
"train": {
"max_epochs": 50,
"prune_epochs": [10],
"split_epochs": [10, 30],
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "NeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core_params": {
"nf": 256,
"n_layers": 2
},
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16
},
"train": {
"max_epochs": 50,
"prune_epochs": [30, 40, 50],
"split_epochs": [10, 30],
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core": "nerfadv",
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined"
},
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
},
"train": {
"max_epochs": 50,
"prune_epochs": [10],
"split_epochs": [10, 30],
"checkpoint_interval": 10
}
}
\ No newline at end of file
{
"model": "NeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core_params": {
"nf": 256,
"n_layers": 4
},
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16
},
"train": {
"max_epochs": 50,
"prune_epochs": [30, 40, 50],
"split_epochs": [10, 20, 30],
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "NeRF",
"args": {
"spherical": true,
"color": "rgb",
"encode_x": 10,
"encode_d": 4,
"core": "nerfadv",
"core_params": {
"density_net": {
"nf": 256,
"n_layers": 2
},
"color_net": {
"nf": 256,
"n_layers": 2
},
"specular_net": {
"nf": 128,
"n_layers": 1
},
"appearance": "combined"
},
"space": "voxels",
"steps": [16, 64, 32],
"n_samples": 64,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
},
"train": {
"max_epochs": 50,
"prune_epochs": [10],
"split_epochs": [20, -1],
"checkpoint_interval": 10
}
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment