Commit 6294701e authored by Nianchen Deng's avatar Nianchen Deng
Browse files

sync

parent 2824f796
expname = mc_nerf_2021.01.17
basedir = ./logs
datadir = ./data/mc_nerf_2021.01.17
dataset_type = llff
factor = 1
llffhold = 8
N_rand = 1024
N_samples = 16
N_importance = 0
netdepth = 4
netwidth = 128
use_viewdirs = True
raw_noise_std = 1e0
# To run: conda env create -f environment.yml
name: nerf
channels:
- conda-forge
dependencies:
- python
- pip
- cudatoolkit
- tensorflow-gpu
- numpy
- matplotlib
- imageio
- imageio-ffmpeg
- configargparse
- imagemagick
import numpy as np
import json
def load_pose_json(filename,near,far):
info = None
with open(filename, encoding='utf-8') as file:
info = json.loads(file.read())
poses = []
for i in range(len(info["view_centers"])):
pose = np.zeros((3, 5), dtype=np.float32)
Rt = np.eye(4, dtype=np.float32)
for j in range(9):
Rt[j // 3, j % 3] = info["view_rots"][i][j]
for j in range(3):
Rt[j, 3] = info["view_centers"][i][j]
# Rt = np.linalg.inv(Rt)
Rt = Rt[:3,:4]
Rt = np.concatenate([-Rt[:, 1:2], Rt[:, 0:1], -Rt[:, 2:3], Rt[:,3:4]],1)
pose[0,:4] = Rt[0]
pose[1,:4] = Rt[2]
pose[2,:4] = Rt[1]
pose[0, 4] = info["view_res"]["x"]
pose[1, 4] = info["view_res"]["y"]
pose[2, 4] = info["cam_params"]["fx"]
pose = pose.flatten()
pose = np.concatenate((pose, [near,far]))
## better to know the near and far plane from Unity-DengNianchen.
## Hard to understand the exact meaning (the depth bound of scene geometry as I can see)
# print(pose.shape)
poses.append(pose)
poses = np.stack(poses)
print(poses.shape)
return poses
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('train_json', type=str,
help='input training set info')
parser.add_argument('near', type=float,
help='scene depth range near')
parser.add_argument('far', type=float,
help='scene depth range far')
args = parser.parse_args()
if __name__ == "__main__":
jsonfile = args.train_json
near = args.near
far = args.far
poses = load_pose_json(jsonfile,near,far)
np.save(args.train_json + "poses_bounds.npy", poses)
np.savetxt(args.train_json + "poses_bounds.txt", poses)
\ No newline at end of file
import os
import tensorflow as tf
import numpy as np
import imageio
import json
trans_t = lambda t : tf.convert_to_tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=tf.float32)
rot_phi = lambda phi : tf.convert_to_tensor([
[1,0,0,0],
[0,tf.cos(phi),-tf.sin(phi),0],
[0,tf.sin(phi), tf.cos(phi),0],
[0,0,0,1],
], dtype=tf.float32)
rot_theta = lambda th : tf.convert_to_tensor([
[tf.cos(th),0,-tf.sin(th),0],
[0,1,0,0],
[tf.sin(th),0, tf.cos(th),0],
[0,0,0,1],
], dtype=tf.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
def load_blender_data(basedir, half_res=False, testskip=1):
splits = ['train', 'val', 'test']
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s=='train' or testskip==0:
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']))
imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
render_poses = tf.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]],0)
if half_res:
imgs = tf.image.resize_area(imgs, [400, 400]).numpy()
H = H//2
W = W//2
focal = focal/2.
return imgs, poses, render_poses, [H, W, focal], i_split
import os
import numpy as np
import imageio
def load_dv_data(scene='cube', basedir='/data/deepvoxels', testskip=8):
def parse_intrinsics(filepath, trgt_sidelength, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy = list(map(float, file.readline().split()))[:3]
grid_barycenter = np.array(list(map(float, file.readline().split())))
near_plane = float(file.readline())
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
print(cx,cy,f,height,width)
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses
def load_pose(filename):
assert os.path.isfile(filename)
nums = open(filename).read().split()
return np.array([float(x) for x in nums]).reshape([4,4]).astype(np.float32)
H = 512
W = 512
deepvoxels_base = '{}/train/{}/'.format(basedir, scene)
full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses = parse_intrinsics(os.path.join(deepvoxels_base, 'intrinsics.txt'), H)
print(full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses)
focal = full_intrinsic[0,0]
print(H, W, focal)
def dir2poses(posedir):
poses = np.stack([load_pose(os.path.join(posedir, f)) for f in sorted(os.listdir(posedir)) if f.endswith('txt')], 0)
transf = np.array([
[1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,1.],
])
poses = poses @ transf
poses = poses[:,:3,:4].astype(np.float32)
return poses
posedir = os.path.join(deepvoxels_base, 'pose')
poses = dir2poses(posedir)
testposes = dir2poses('{}/test/{}/pose'.format(basedir, scene))
testposes = testposes[::testskip]
valposes = dir2poses('{}/validation/{}/pose'.format(basedir, scene))
valposes = valposes[::testskip]
imgfiles = [f for f in sorted(os.listdir(os.path.join(deepvoxels_base, 'rgb'))) if f.endswith('png')]
imgs = np.stack([imageio.imread(os.path.join(deepvoxels_base, 'rgb', f))/255. for f in imgfiles], 0).astype(np.float32)
testimgd = '{}/test/{}/rgb'.format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(testimgd)) if f.endswith('png')]
testimgs = np.stack([imageio.imread(os.path.join(testimgd, f))/255. for f in imgfiles[::testskip]], 0).astype(np.float32)
valimgd = '{}/validation/{}/rgb'.format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(valimgd)) if f.endswith('png')]
valimgs = np.stack([imageio.imread(os.path.join(valimgd, f))/255. for f in imgfiles[::testskip]], 0).astype(np.float32)
all_imgs = [imgs, valimgs, testimgs]
counts = [0] + [x.shape[0] for x in all_imgs]
counts = np.cumsum(counts)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate([poses, valposes, testposes], 0)
render_poses = testposes
print(poses.shape, imgs.shape)
return imgs, poses, render_poses, [H,W,focal], i_split
import numpy as np
import os, imageio
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100./r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)])
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1,2,0])
bds = poses_arr[:, -2:].transpose([1,0])
img0 = [os.path.join(basedir, 'images', f) for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = imageio.imread(img0).shape
sfx = ''
if factor is not None:
sfx = '_{}'.format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, 'images' + sfx)
if not os.path.exists(imgdir):
print( imgdir, 'does not exist, returning' )
return
imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')]
if poses.shape[-1] != len(imgfiles):
print( 'Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]) )
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1./factor
if not load_imgs:
return poses, bds
def imread(f):
if f.endswith('png'):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
imgs = imgs = [imread(f)[...,:3]/255. for f in imgfiles]
imgs = np.stack(imgs, -1)
print('Loaded image data', imgs.shape, poses[:,-1,0])
return poses, bds, imgs
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3,:3].T, (pts-c2w[:3,3])[...,np.newaxis])[...,0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
rads = np.array(list(rads) + [1.])
hwf = c2w[:,4:5]
for theta in np.linspace(0., 2. * np.pi * rots, N+1)[:-1]:
c = np.dot(c2w[:3,:4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta*zrate), 1.]) * rads)
z = normalize(c - np.dot(c2w[:3,:4], np.array([0,0,-focal, 1.])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
def recenter_poses(train_poses, poses, hasValue=False, c2w=None):
poses_ = poses+0
bottom = np.reshape([0,0,0,1.], [1,4])
if hasValue == False:
c2w = poses_avg(train_poses)
c2w = np.concatenate([c2w[:3,:4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1,1,4]), [poses.shape[0],1,1])
poses = np.concatenate([poses[:,:3,:4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:,:3,:4] = poses[:,:3,:4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds):
p34_to_44 = lambda p : np.concatenate([p, np.tile(np.reshape(np.eye(4)[-1,:], [1,1,4]), [p.shape[0], 1,1])], 1)
rays_d = poses[:,:3,2:3]
rays_o = poses[:,:3,3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0,2,1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(-np.linalg.inv((np.transpose(A_i, [0,2,1]) @ A_i).mean(0)) @ (b_i).mean(0))
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:,:3,3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([.1,.2,.3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:,:3,:4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:,:3,3]), -1)))
sc = 1./rad
poses_reset[:,:3,3] *= sc
bds *= sc
rad *= sc
centroid = np.mean(poses_reset[:,:3,3], 0)
zh = centroid[2]
radcircle = np.sqrt(rad**2-zh**2)
new_poses = []
for th in np.linspace(0.,2.*np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0,0,-1.])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin
p = np.stack([vec0, vec1, vec2, pos], 1)
new_poses.append(p)
new_poses = np.stack(new_poses, 0)
new_poses = np.concatenate([new_poses, np.broadcast_to(poses[0,:3,-1:], new_poses[:,:3,-1:].shape)], -1)
poses_reset = np.concatenate([poses_reset[:,:3,:4], np.broadcast_to(poses[0,:3,-1:], poses_reset[:,:3,-1:].shape)], -1)
return poses_reset, new_poses, bds
def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False, test_index=1):
poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x
print('Loaded', basedir, bds.min(), bds.max())
print(test_index, imgs.shape[0] / test_index)
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
images = imgs
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor)
poses[:,:3,3] *= sc
bds *= sc
if recenter:
print("_load_data poses_arr.shape", poses.shape)
train_arr = poses[(int)(imgs.shape[0] / test_index):, :]
print("_load_data train_arr.shape", train_arr.shape)
if os.path.isfile(os.path.join(basedir, 'c2w_poses_avg.txt')):
c2w = np.loadtxt(os.path.join(basedir, 'c2w_poses_avg.txt'))
print("load c2w", c2w)
poses = recenter_poses(train_arr, poses, True, c2w)
else:
poses = recenter_poses(train_arr, poses)
c2w = poses_avg(train_arr)
np.savetxt(os.path.join(basedir, 'c2w_poses_avg.txt'), c2w)
print('recentered', c2w.shape)
print(c2w[:3, :4])
if spherify:
poses, render_poses, bds = spherify_poses(poses, bds)
else:
c2w = poses_avg(poses)
print('recentered', c2w.shape)
print(c2w[:3,:4])
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
close_depth, inf_depth = bds.min()*.9, bds.max()*5.
dt = .75
mean_dz = 1./(((1.-dt)/close_depth + dt/inf_depth))
focal = mean_dz
# Get radii for spiral path
shrink_factor = .8
zdelta = close_depth * .2
tt = poses[:,:3,3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
if path_zflat:
# zloc = np.percentile(tt, 10, 0)[2]
zloc = -close_depth * .1
c2w_path[:3,3] = c2w_path[:3,3] + zloc * c2w_path[:3,2]
rads[2] = 0.
N_rots = 1
N_views/=2
# Generate poses for spiral path
render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views)
render_poses = np.array(render_poses).astype(np.float32)
c2w = poses_avg(poses)
print('Data:')
print(poses.shape, images.shape, bds.shape)
dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1)
i_test = np.argmin(dists)
print('HOLDOUT view is', i_test)
images = images.astype(np.float32)
poses = poses.astype(np.float32)
render_poses = poses
return images, poses, bds, render_poses, i_test
def load_demo_llff_data(basedir, demopath = 'demo_poses.npy', factor=8, bd_factor=.75):
poses_arr = np.load(os.path.join(basedir, demopath))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])
bds = poses_arr[:, -2:].transpose([1, 0])
sh = [1600,1440,4]#imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1. / factor
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1. if bd_factor is None else 1. / (bds.min() * bd_factor)
poses[:, :3, 3] *= sc
bds *= sc
if os.path.isfile(os.path.join(basedir, 'c2w_poses_avg.txt')):
c2w = np.loadtxt(os.path.join(basedir, 'c2w_poses_avg.txt'))
print("load c2w", c2w)
poses = recenter_poses(poses, poses, True, c2w)
else:
print("FAILED TO FIND C2W")
demo_poses = poses.astype(np.float32)
return demo_poses, bds
N_importance = 64
N_rand = 1024
N_samples = 64
basedir = ./logs
chunk = 32768
config = config_bar0514.txt
datadir = ./data/barbershop_2021.05.04
dataset_type = llff
demo_path = demo_poses.npy
expname = bar0514
factor = 1
ft_path = None
half_res = False
i_embed = 0
i_img = 500
i_print = 100
i_testset = 50000
i_video = 50000
i_weights = 10000
lindisp = False
llffhold = 20
lrate = 0.0005
lrate_decay = 250
multires = 10
multires_views = 4
netchunk = 65536
netdepth = 8
netdepth_fine = 8
netwidth = 256
netwidth_fine = 256
no_batching = False
no_ndc = False
no_reload = False
perturb = 1.0
precrop_frac = 0.5
precrop_iters = 0
random_seed = None
raw_noise_std = 1.0
render_demo = False
render_factor = 0
render_only = True
render_test = True
shape = greek
spherify = False
testskip = 8
use_viewdirs = True
white_bkgd = False
expname = bar0514
basedir = ./logs
datadir = ./data/barbershop_2021.05.04
dataset_type = llff
factor = 1
llffhold = 20
N_rand = 1024
N_samples = 64
N_importance = 64
use_viewdirs = True
raw_noise_std = 1e0
N_importance = 0
N_rand = 1024
N_samples = 16
basedir = ./logs
chunk = 32768
config = config_bedroom.txt
datadir = ./data/bedroom_nerf_2021.01.18
dataset_type = llff
demo_path = demo_poses.npy
expname = bedroom_nerf_2021.01.18
factor = 1
ft_path = None
half_res = False
i_embed = 0
i_img = 500
i_print = 100
i_testset = 50000
i_video = 50000
i_weights = 10000
lindisp = False
llffhold = 8
lrate = 0.0005
lrate_decay = 250
multires = 10
multires_views = 4
netchunk = 65536
netdepth = 4
netdepth_fine = 8
netwidth = 128
netwidth_fine = 256
no_batching = False
no_ndc = False
no_reload = False
perturb = 1.0
precrop_frac = 0.5
precrop_iters = 0
random_seed = None
raw_noise_std = 1.0
render_demo = True
render_factor = 0
render_only = False
render_test = False
shape = greek
spherify = False
testskip = 8
use_viewdirs = True
white_bkgd = False
expname = bedroom_nerf_2021.01.18
basedir = ./logs
datadir = ./data/bedroom_nerf_2021.01.18
dataset_type = llff
factor = 1
llffhold = 8
N_rand = 1024
N_samples = 16
N_importance = 0
netdepth = 4
netwidth = 128
use_viewdirs = True
raw_noise_std = 1e0
N_importance = 0
N_rand = 1024
N_samples = 16
basedir = ./logs
chunk = 32768
config = config_gallery.txt
datadir = ./data/gallery_nerf_2021.01.20
dataset_type = llff
demo_path = demo_poses.npy
expname = gallery_nerf_2021.01.20
factor = 1
ft_path = None
half_res = False
i_embed = 0
i_img = 500
i_print = 100
i_testset = 50000
i_video = 50000
i_weights = 10000
lindisp = False
llffhold = 8
lrate = 0.0005
lrate_decay = 250
multires = 10
multires_views = 4
netchunk = 65536
netdepth = 4
netdepth_fine = 8
netwidth = 128
netwidth_fine = 256
no_batching = False
no_ndc = False
no_reload = False
perturb = 1.0
precrop_frac = 0.5
precrop_iters = 0
random_seed = None
raw_noise_std = 1.0
render_demo = True
render_factor = 0
render_only = False
render_test = False
shape = greek
spherify = False
testskip = 8
use_viewdirs = True
white_bkgd = False
expname = gallery_nerf_2021.01.20
basedir = ./logs
datadir = ./data/gallery_nerf_2021.01.20
dataset_type = llff
factor = 1
llffhold = 8
N_rand = 1024
N_samples = 16
N_importance = 0
netdepth = 4
netwidth = 128
use_viewdirs = True
raw_noise_std = 1e0
N_importance = 0
N_rand = 1024
N_samples = 16
basedir = ./logs
chunk = 32768
config = config_gas.txt
datadir = ./data/gas_nerf_2021.01.17
dataset_type = llff
demo_path = demo_poses.npy
expname = gas_nerf_2021.01.17
factor = 1
ft_path = None
half_res = False
i_embed = 0
i_img = 500
i_print = 100
i_testset = 50000
i_video = 50000
i_weights = 10000
lindisp = False
llffhold = 8
lrate = 0.0005
lrate_decay = 250
multires = 10
multires_views = 4
netchunk = 65536
netdepth = 4
netdepth_fine = 8
netwidth = 128
netwidth_fine = 256
no_batching = False
no_ndc = False
no_reload = False
perturb = 1.0
precrop_frac = 0.5
precrop_iters = 0
random_seed = None
raw_noise_std = 1.0
render_demo = True
render_factor = 0
render_only = False
render_test = False
shape = greek
spherify = False
testskip = 8
use_viewdirs = True
white_bkgd = False
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment