Commit 5699ccbf authored by Nianchen Deng's avatar Nianchen Deng
Browse files

sync

parent 338ae906
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 2,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [16, 64, 32],
"n_samples": 64,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "newtype",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4,
"specular_regularization_weight": 1e-1,
"specular_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 5,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 2,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 3,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 4,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 2,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 5,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 256,
"n_layers": 8,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 256,
"n_layers": 6,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 2,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvance",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 512,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 512,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 256,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvanceX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 128,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 128,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "_nets/hr_r0.8s/snerfadv_voxels+ls6/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4,
"multi_nets": 16
}
}
\ No newline at end of file
{
"model": "SNeRFAdvanceX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 128,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 128,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "_nets/train_t0.3/snerfadv_voxels+ls2/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4,
"multi_nets": 4
}
}
\ No newline at end of file
{
"model": "SNeRFAdvanceX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"density_net": {
"nf": 128,
"n_layers": 4,
"act": "relu",
"skips": []
},
"color_net": {
"nf": 128,
"n_layers": 3,
"act": "relu",
"skips": []
},
"specular_net": {
"nf": 128,
"n_layers": 1,
"act": "relu"
},
"n_featdim": 0,
"space": "_nets/hr_t1.0s/snerfadv_voxels+ls2/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"appearance": "combined",
"density_color_connection": true,
"density_regularization_weight": 1e-4,
"density_regularization_scale": 1e4,
"multi_nets": 8
}
}
\ No newline at end of file
{
"model": "SNeRFX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"fc_params": {
"nf": 128,
"n_layers": 4,
"activation": "relu",
"skips": []
},
"n_featdim": 0,
"space": "nets/train1/snerf_voxels/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"multi_nets": 4
}
}
\ No newline at end of file
{
"model": "SNeRFX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"fc_params": {
"nf": 128,
"n_layers": 4,
"activation": "relu",
"skips": []
},
"n_featdim": 0,
"space": "nets/train_t0.3/snerf_voxels/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"multi_nets": 8
}
}
\ No newline at end of file
{
"model": "SNeRFX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"fc_params": {
"nf": 128,
"n_layers": 8,
"activation": "relu",
"skips": [4]
},
"n_featdim": 0,
"space": "nets/train_t0.3/snerf_voxels/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"multi_nets": 4
}
}
\ No newline at end of file
{
"model": "SNeRFX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"fc_params": {
"nf": 256,
"n_layers": 4,
"activation": "relu",
"skips": []
},
"n_featdim": 0,
"space": "nets/train1/snerf_voxels/checkpoint_50.tar",
"n_samples": 256,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"multi_nets": 4
}
}
\ No newline at end of file
{
"model": "SNeRFX",
"args": {
"color": "rgb",
"n_pot_encode": 10,
"n_dir_encode": 4,
"fc_params": {
"nf": 256,
"n_layers": 4,
"activation": "relu",
"skips": []
},
"n_featdim": 0,
"space": "voxels",
"steps": [4, 16, 8],
"n_samples": 16,
"perturb_sample": true,
"raymarching_tolerance": 0,
"raymarching_chunk_size": -1,
"multi_nets": 4
}
}
\ No newline at end of file
import os
import argparse
import torch
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import numpy as np
# from skimage import data
from pathlib import Path
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=int, default=0,
help='Which CUDA device to use.')
opt = parser.parse_args()
# Select device
torch.cuda.set_device(opt.device)
print("Set CUDA:%d as current device." % torch.cuda.current_device())
torch.autograd.set_grad_enabled(False)
from data.spherical_view_syn import *
from configs.spherical_view_syn import SphericalViewSynConfig
from utils import netio
from utils import device
from utils import view
from utils import img
from utils import misc
from nets.modules import AlphaComposition, Sampler
import model as mdl
from modules import AlphaComposition, Sampler
datadir = 'data/__new/lobby_fovea_r360x80_t1.0/'
data_desc_file = 'train1.json'
datadir = Path('data/__new/classroom_fovea_r360x80_t0.6')
data_desc_file = 'r120x80.json'
net_config = 'fovea@snerffast4-rgb_e6_fc512x4_d2.00-50.00_s64_~p'
net_path = datadir + net_config + '/model-epoch_200.pth'
model_path = datadir / 'snerf_voxels/checkpoint_50.tar'
fov = 40
res = (256, 256)
pix_img_res = (256, 256)
center = (0, 0)
def load_net(path):
print(path)
config = SphericalViewSynConfig()
config.from_id(net_config)
config.sa['perturb_sample'] = False
net = config.create_net().to(device.default())
netio.load(path, net)
return net
def load_net_by_name(name):
for path in os.listdir(datadir):
if path.startswith(name + '@'):
return load_net(datadir + path)
return None
def load_data_desc(data_desc_file) -> view.Trans:
with open(datadir + data_desc_file, 'r', encoding='utf-8') as file:
data_desc = json.loads(file.read())
......@@ -85,7 +56,10 @@ cam = view.CameraParam({
'cy': 0.5,
'normalized': True
}, res, device=device.default())
net = load_net(net_path)
model, _ = mdl.load(model_path, {
"perturb_sample": False
})
# Global states
x = y = None
......@@ -159,7 +133,7 @@ app.layout = html.Div([
def plot_alpha_and_density(ray_o, ray_d):
# colors, densities, depths = net.sample_and_infer(ray_o, ray_d, sampler=sampler)
ret = net(ray_o, ray_d, ret_depth=True, debug=True)
ret = model(ray_o, ray_d, extra_outputs=['depth', 'layers'])
colors = ret['layers'][..., : 3]
densities = ret['sample_densities']
depths = ret['sample_depths']
......@@ -202,7 +176,7 @@ def plot_pixel_image(ray_o, ray_d, r=1):
], dim=-1).to(device.default())
rays_d = pixel_point - rays_o
rays_d /= rays_d.norm(dim=-1, keepdim=True)
image = net(rays_o.view(-1, 3), rays_d.view(-1, 3))['color'] \
image = model(rays_o.view(-1, 3), rays_d.view(-1, 3))['color'] \
.view(1, *pix_img_res, -1).permute(0, 3, 1, 2)
fig = px.imshow(img.torch2np(image)[0])
return fig
......@@ -230,10 +204,10 @@ def render_view(tx, ty, tz, rx, ry):
torch.tensor(view.euler_to_matrix([ry, rx, 0]), device=device.default()).view(-1, 3, 3)
)
rays_o, rays_d = cam.get_global_rays(test_view, True)
ret = net(rays_o.view(-1, 3), rays_d.view(-1, 3), debug=True)
image = ret['color'].view(1, res[0], res[1], 3).permute(0, 3, 1, 2)
layers = ret['layers'].view(res[0], res[1], -1, 4)
layer_weights = ret['weight'].view(res[0], res[1], -1)
ret = model(rays_o.view(-1, 3), rays_d.view(-1, 3), extra_outputs=['layers', 'weights'])
image = ret['color'].view(1, *res, 3).permute(0, 3, 1, 2)
layers = ret['layers'].view(*res, -1, 4)
layer_weights = ret['weight'].view(*res, -1)
fig = px.imshow(img.torch2np(image)[0])
return fig
......@@ -241,17 +215,13 @@ def render_view(tx, ty, tz, rx, ry):
def render_layer(layer):
if layer is None:
return None
layer_data = torch.sum(layers[..., range(*layer), :3] * layer_weights[..., range(*layer), None],
dim=-2)
#layer_data = layer_data[..., :3] * layer_data[..., 3:]
layer_data = torch.sum((layers * layer_weights)[..., range(*layer), :3], dim=-2)
fig = px.imshow(img.torch2np(layer_data))
return fig
def view_pixel(fig, x, y, samples):
sampler = Sampler(depth_range=(1, 50), n_samples=samples,
perturb_sample=False, spherical=True,
lindisp=True, inverse_r=True)
sampler = model.sampler
if x is None or y is None:
return None
p = torch.tensor([x, y], device=device.default())
......
import os
import json
import os
from pathlib import Path
from typing import Union
import utils.device
from .pano_dataset import PanoDataset
from .view_dataset import ViewDataset
......@@ -8,16 +11,26 @@ from .view_dataset import ViewDataset
class DatasetFactory(object):
@staticmethod
def load(path, device=None, **kwargs):
def get_dataset_desc_path(path: Union[Path, str]):
if isinstance(path, str):
path = Path(path)
if path.suffix != ".json":
if os.path.exists(f"{path}.json"):
path = Path(f"{path}.json")
else:
path = path / "train.json"
return path
@staticmethod
def load(path: Path, device=None, **kwargs):
device = device or utils.device.default()
data_dir = os.path.dirname(path)
path = DatasetFactory.get_dataset_desc_path(path)
with open(path, 'r', encoding='utf-8') as file:
data_desc = json.loads(file.read())
cwd = os.getcwd()
os.chdir(data_dir)
if 'type' in data_desc and data_desc['type'] == 'pano':
dataset = PanoDataset(data_desc, device=device, **kwargs)
data_desc: dict = json.loads(file.read())
if data_desc.get('type') == 'pano':
dataset_class = PanoDataset
else:
dataset = ViewDataset(data_desc, device=device, **kwargs)
os.chdir(cwd)
return dataset
\ No newline at end of file
dataset_class = ViewDataset
dataset = dataset_class(data_desc, root=path.absolute().parent, name=path.stem,
device=device, **kwargs)
return dataset
from doctest import debug_script
from logging import *
import threading
import torch
import math
from logging import *
from typing import Dict
class Preloader(object):
......@@ -75,17 +75,18 @@ class DataLoader(object):
self.chunk_idx += 1
self.current_chunk = self.chunks[self.chunk_idx]
self.offset = 0
self.indices = torch.randperm(len(self.current_chunk), device=self.device) \
self.indices = torch.randperm(len(self.current_chunk)).to(device=self.device) \
if self.shuffle else None
if self.preloader is not None:
self.preloader.preload_chunk(self.chunks[(self.chunk_idx + 1) % len(self.chunks)])
def __init__(self, dataset, batch_size, *,
chunk_max_items=None, shuffle=False, enable_preload=True):
chunk_max_items=None, shuffle=False, enable_preload=True, **chunk_args):
super().__init__()
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.chunk_args = chunk_args
self.preloader = Preloader(self.dataset.device) if enable_preload else None
self._init_chunks(chunk_max_items)
......@@ -97,20 +98,18 @@ class DataLoader(object):
return sum(math.ceil(len(chunk) / self.batch_size) for chunk in self.chunks)
def _init_chunks(self, chunk_max_items):
data = self.dataset.get_data()
data: Dict[str, torch.Tensor] = self.dataset.get_data()
if self.shuffle:
rand_seq = torch.randperm(self.dataset.n_views, device=self.dataset.device)
for key in data:
data[key] = data[key][rand_seq]
rand_seq = torch.randperm(self.dataset.n_views).to(device=self.dataset.device)
data = {key: val[rand_seq] for key, val in data.items()}
self.chunks = []
n_chunks = 1 if chunk_max_items is None else \
math.ceil(self.dataset.n_pixels / chunk_max_items)
views_per_chunk = math.ceil(self.dataset.n_views / n_chunks)
for offset in range(0, self.dataset.n_views, views_per_chunk):
sel = slice(offset, offset + views_per_chunk)
chunk_data = {}
for key in data:
chunk_data[key] = data[key][sel]
self.chunks.append(self.dataset.Chunk(len(self.chunks), self.dataset, **chunk_data))
chunk_data = {key: val[sel] for key, val in data.items()}
self.chunks.append(self.dataset.Chunk(len(self.chunks), self.dataset,
chunk_data=chunk_data, **self.chunk_args))
if self.preloader is not None:
self.preloader.preload_chunk(self.chunks[0])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment