Commit 2a1d7973 authored by Nianchen Deng's avatar Nianchen Deng
Browse files

for batch infer evaluation on multiple hosts

parent 7e0ade21
This diff is collapsed.
......@@ -2,23 +2,16 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:2 as current device.\n"
]
}
],
"outputs": [],
"source": [
"import sys\n",
"import os\n",
"import torch\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"torch.autograd.set_grad_enabled(False)\n",
......@@ -74,24 +67,12 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/__0_user_study/us_mc_all_in_one\n",
"Load net from fovea@nmsl-rgb_e10_fc128x4_d1-50_s32.pth ...\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1-50_s16.pth ...\n",
"Dataset loaded.\n",
"views: [110]\n"
]
}
],
"outputs": [],
"source": [
"scene = 'mc'\n",
"os.chdir(sys.path[0] + '/../data/' + scenes[scene])\n",
"os.chdir(os.path.join(rootdir, 'data/' + scenes[scene]))\n",
"print('Change working directory to ', os.getcwd())\n",
"\n",
"fovea_net = load_net(find_file('fovea'))\n",
......@@ -128,7 +109,7 @@
" # ), test_view.r), mono_trans=test_view, ret_raw=True)\n",
" #plot_figures(images, center)\n",
"\n",
" outputdir = '/home/dengnc/deep_view_syn/data/__1_eval/output_mono_periph/ref_as_right_eye/%s/' % scene\n",
" outputdir = '../__1_eval/output_mono_periph/ref_as_right_eye/%s/' % scene\n",
" misc.create_dir(outputdir)\n",
" #for key in images:\n",
" key = 'blended'\n",
......@@ -137,21 +118,9 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "gen() takes 3 positional arguments but 4 were given",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-245bea67ea44>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mcenter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mtest_view\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mviews\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mimages\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcenter\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_view\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;31m#plot_figures(images, center)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: gen() takes 3 positional arguments but 4 were given"
]
}
],
"outputs": [],
"source": [
"import numpy as np\n",
"gaze_idx = 0\n",
......
......@@ -2,24 +2,17 @@
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:2 as current device.\n"
]
}
],
"outputs": [],
"source": [
"import sys\n",
"import os\n",
"import torch\n",
"import matplotlib.pyplot as plt\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"\n",
......@@ -97,43 +90,13 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/lobby_all_in_one\n",
"==== Config fovea ====\n",
"Net type: nmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 128, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 32, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from fovea@nmsl-rgb_e10_fc128x4_d1.00-50.00_s32.pth ...\n",
"==== Config periph ====\n",
"Net type: nnmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 64, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 16, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1.00-50.00_s16.pth ...\n",
"Dataset loaded.\n",
"views: [13]\n"
]
}
],
"outputs": [],
"source": [
"#os.chdir(sys.path[0] + '/../data/__0_user_study/us_gas_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/__0_user_study/us_mc_all_in_one')\n",
"os.chdir(sys.path[0] + '/../data/lobby_all_in_one')\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_gas_all_in_one'))\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_mc_all_in_one'))\n",
"os.chdir(os.path.join(rootdir, 'data/lobby_all_in_one'))\n",
"print('Change working directory to ', os.getcwd())\n",
"torch.autograd.set_grad_enabled(False)\n",
"\n",
......
......@@ -19,7 +19,8 @@
"import torch\n",
"import matplotlib.pyplot as plt\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"\n",
......@@ -97,43 +98,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/__0_user_study/us_gas_all_in_one\n",
"==== Config fovea ====\n",
"Net type: nmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 128, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 32, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from fovea@nmsl-rgb_e10_fc128x4_d1-50_s32.pth ...\n",
"==== Config periph ====\n",
"Net type: nnmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 64, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 16, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1-50_s16.pth ...\n",
"Dataset loaded.\n",
"views: [110]\n"
]
}
],
"outputs": [],
"source": [
"os.chdir(sys.path[0] + '/../data/__0_user_study/us_gas_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/__0_user_study/us_mc_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/lobby_all_in_one')\n",
"os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_gas_all_in_one'))\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_mc_all_in_one'))\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/lobby_all_in_one'))\n",
"print('Change working directory to ', os.getcwd())\n",
"torch.autograd.set_grad_enabled(False)\n",
"\n",
......@@ -156,133 +127,9 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GetRays: 580.4ms\n",
"Sample: 2.1ms\n",
"Encode: 2.2ms\n",
"Rays: tensor([[-0.1711, 0.1711, 0.9702],\n",
" [-0.1685, 0.1711, 0.9707],\n",
" [-0.1659, 0.1713, 0.9712],\n",
" ...,\n",
" [ 0.1633, -0.1687, 0.9722],\n",
" [ 0.1660, -0.1687, 0.9717],\n",
" [ 0.1686, -0.1686, 0.9712]], device='cuda:2')\n",
"Spherical coords: tensor([[[1.0000, 1.7454, 1.3988],\n",
" [0.9684, 1.7454, 1.3988],\n",
" [0.9368, 1.7454, 1.3988],\n",
" ...,\n",
" [0.0832, 1.7454, 1.3988],\n",
" [0.0516, 1.7454, 1.3988],\n",
" [0.0200, 1.7454, 1.3988]],\n",
"\n",
" [[1.0000, 1.7426, 1.3988],\n",
" [0.9684, 1.7426, 1.3988],\n",
" [0.9368, 1.7426, 1.3988],\n",
" ...,\n",
" [0.0832, 1.7426, 1.3988],\n",
" [0.0516, 1.7426, 1.3988],\n",
" [0.0200, 1.7426, 1.3988]],\n",
"\n",
" [[1.0000, 1.7400, 1.3987],\n",
" [0.9684, 1.7400, 1.3987],\n",
" [0.9368, 1.7400, 1.3987],\n",
" ...,\n",
" [0.0832, 1.7400, 1.3987],\n",
" [0.0516, 1.7400, 1.3987],\n",
" [0.0200, 1.7400, 1.3987]],\n",
"\n",
" ...,\n",
"\n",
" [[1.0000, 1.4043, 1.7403],\n",
" [0.9684, 1.4043, 1.7403],\n",
" [0.9368, 1.4043, 1.7403],\n",
" ...,\n",
" [0.0832, 1.4043, 1.7403],\n",
" [0.0516, 1.4043, 1.7403],\n",
" [0.0200, 1.4043, 1.7403]],\n",
"\n",
" [[1.0000, 1.4016, 1.7403],\n",
" [0.9684, 1.4016, 1.7403],\n",
" [0.9368, 1.4016, 1.7403],\n",
" ...,\n",
" [0.0832, 1.4016, 1.7403],\n",
" [0.0516, 1.4016, 1.7403],\n",
" [0.0200, 1.4016, 1.7403]],\n",
"\n",
" [[1.0000, 1.3989, 1.7402],\n",
" [0.9684, 1.3989, 1.7402],\n",
" [0.9368, 1.3989, 1.7402],\n",
" ...,\n",
" [0.0832, 1.3989, 1.7402],\n",
" [0.0516, 1.3989, 1.7402],\n",
" [0.0200, 1.3989, 1.7402]]], device='cuda:2')\n",
"Depths: tensor([[ 1.0001, 1.0327, 1.0675, ..., 12.0161, 19.3760, 50.0026],\n",
" [ 1.0000, 1.0327, 1.0675, ..., 12.0159, 19.3757, 50.0017],\n",
" [ 1.0000, 1.0326, 1.0675, ..., 12.0151, 19.3744, 49.9984],\n",
" ...,\n",
" [ 0.9999, 1.0325, 1.0674, ..., 12.0140, 19.3726, 49.9938],\n",
" [ 0.9999, 1.0326, 1.0674, ..., 12.0144, 19.3732, 49.9954],\n",
" [ 1.0000, 1.0326, 1.0675, ..., 12.0152, 19.3745, 49.9987]],\n",
" device='cuda:2')\n",
"Encoded: tensor([[[ 1.0000, 1.7454, 1.3988, ..., -0.9968, 0.1395, 0.9952],\n",
" [ 0.9684, 1.7454, 1.3988, ..., 0.8486, 0.1395, 0.9952],\n",
" [ 0.9368, 1.7454, 1.3988, ..., -0.5103, 0.1395, 0.9952],\n",
" ...,\n",
" [ 0.0832, 1.7454, 1.3988, ..., 0.1988, 0.1395, 0.9952],\n",
" [ 0.0516, 1.7454, 1.3988, ..., 0.2742, 0.1395, 0.9952],\n",
" [ 0.0200, 1.7454, 1.3988, ..., -0.6857, 0.1395, 0.9952]],\n",
"\n",
" [[ 1.0000, 1.7426, 1.3988, ..., -0.9968, 0.9999, 0.9953],\n",
" [ 0.9684, 1.7426, 1.3988, ..., 0.8486, 0.9999, 0.9953],\n",
" [ 0.9368, 1.7426, 1.3988, ..., -0.5103, 0.9999, 0.9953],\n",
" ...,\n",
" [ 0.0832, 1.7426, 1.3988, ..., 0.1988, 0.9999, 0.9953],\n",
" [ 0.0516, 1.7426, 1.3988, ..., 0.2742, 0.9999, 0.9953],\n",
" [ 0.0200, 1.7426, 1.3988, ..., -0.6857, 0.9999, 0.9953]],\n",
"\n",
" [[ 1.0000, 1.7400, 1.3987, ..., -0.9968, 0.2253, 0.9881],\n",
" [ 0.9684, 1.7400, 1.3987, ..., 0.8486, 0.2253, 0.9881],\n",
" [ 0.9368, 1.7400, 1.3987, ..., -0.5103, 0.2253, 0.9881],\n",
" ...,\n",
" [ 0.0832, 1.7400, 1.3987, ..., 0.1988, 0.2253, 0.9881],\n",
" [ 0.0516, 1.7400, 1.3987, ..., 0.2742, 0.2253, 0.9881],\n",
" [ 0.0200, 1.7400, 1.3987, ..., -0.6857, 0.2253, 0.9881]],\n",
"\n",
" ...,\n",
"\n",
" [[ 1.0000, 1.4043, 1.7403, ..., -0.9968, -0.9210, 0.3760],\n",
" [ 0.9684, 1.4043, 1.7403, ..., 0.8486, -0.9210, 0.3760],\n",
" [ 0.9368, 1.4043, 1.7403, ..., -0.5103, -0.9210, 0.3760],\n",
" ...,\n",
" [ 0.0832, 1.4043, 1.7403, ..., 0.1988, -0.9210, 0.3760],\n",
" [ 0.0516, 1.4043, 1.7403, ..., 0.2742, -0.9210, 0.3760],\n",
" [ 0.0200, 1.4043, 1.7403, ..., -0.6857, -0.9210, 0.3760]],\n",
"\n",
" [[ 1.0000, 1.4016, 1.7403, ..., -0.9968, 0.2445, 0.3786],\n",
" [ 0.9684, 1.4016, 1.7403, ..., 0.8486, 0.2445, 0.3786],\n",
" [ 0.9368, 1.4016, 1.7403, ..., -0.5103, 0.2445, 0.3786],\n",
" ...,\n",
" [ 0.0832, 1.4016, 1.7403, ..., 0.1988, 0.2445, 0.3786],\n",
" [ 0.0516, 1.4016, 1.7403, ..., 0.2742, 0.2445, 0.3786],\n",
" [ 0.0200, 1.4016, 1.7403, ..., -0.6857, 0.2445, 0.3786]],\n",
"\n",
" [[ 1.0000, 1.3989, 1.7402, ..., -0.9968, 0.9995, 0.3247],\n",
" [ 0.9684, 1.3989, 1.7402, ..., 0.8486, 0.9995, 0.3247],\n",
" [ 0.9368, 1.3989, 1.7402, ..., -0.5103, 0.9995, 0.3247],\n",
" ...,\n",
" [ 0.0832, 1.3989, 1.7402, ..., 0.1988, 0.9995, 0.3247],\n",
" [ 0.0516, 1.3989, 1.7402, ..., 0.2742, 0.9995, 0.3247],\n",
" [ 0.0200, 1.3989, 1.7402, ..., -0.6857, 0.9995, 0.3247]]],\n",
" device='cuda:2')\n"
]
}
],
"outputs": [],
"source": [
"test_view = view.Trans(\n",
" torch.tensor([[0.0, 0.0, 0.0]], device=device.default()),\n",
......
......@@ -2,24 +2,17 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:2 as current device.\n"
]
}
],
"outputs": [],
"source": [
"import sys\n",
"import os\n",
"import torch\n",
"import matplotlib.pyplot as plt\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"\n",
......@@ -132,43 +125,13 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/__0_user_study/us_mc_all_in_one\n",
"==== Config fovea ====\n",
"Net type: nmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 128, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 32, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from fovea@nmsl-rgb_e10_fc128x4_d1-50_s32.pth ...\n",
"==== Config periph ====\n",
"Net type: nnmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 64, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 16, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1-50_s16.pth ...\n",
"Dataset loaded.\n",
"views: [5, 5, 5, 5, 5]\n"
]
}
],
"outputs": [],
"source": [
"#os.chdir(sys.path[0] + '/../data/__0_user_study/us_gas_all_in_one')\n",
"os.chdir(sys.path[0] + '/../data/__0_user_study/us_mc_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/bedroom_all_in_one')\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_gas_all_in_one'))\n",
"os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_mc_all_in_one'))\n",
"#os.chdir(os.path.join(rootdir, 'data/__0_user_study/bedroom_all_in_one'))\n",
"print('Change working directory to ', os.getcwd())\n",
"torch.autograd.set_grad_enabled(False)\n",
"\n",
......@@ -192,19 +155,9 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"view_coord: [2, 2, 2, 2, 2]\n",
"shift: 3\n",
"shift: -3\n"
]
}
],
"outputs": [],
"source": [
"centers = [\n",
" # ==gas==\n",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,17 +2,9 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Set CUDA:2 as current device.\n"
]
}
],
"outputs": [],
"source": [
"import sys\n",
"import os\n",
......@@ -20,7 +12,8 @@
"import matplotlib.pyplot as plt\n",
"import torchvision.transforms.functional as trans_f\n",
"\n",
"sys.path.append(os.path.abspath(sys.path[0] + '/../'))\n",
"rootdir = os.path.abspath(sys.path[0] + '/../')\n",
"sys.path.append(rootdir)\n",
"torch.cuda.set_device(2)\n",
"print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
"\n",
......@@ -82,43 +75,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Change working directory to /home/dengnc/deep_view_syn/data/__0_user_study/us_gas_all_in_one\n",
"==== Config fovea ====\n",
"Net type: nmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 128, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 32, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from fovea@nmsl-rgb_e10_fc128x4_d1-50_s32.pth ...\n",
"==== Config periph ====\n",
"Net type: nnmsl\n",
"Encode dim: 10\n",
"Optimizer decay: 0\n",
"Normalize: False\n",
"Direction as input: False\n",
"Full-connected network parameters: {'nf': 64, 'n_layers': 4, 'skips': []}\n",
"Sample parameters {'spherical': True, 'depth_range': (1.0, 50.0), 'n_samples': 16, 'perturb_sample': False, 'lindisp': True, 'inverse_r': True}\n",
"==========================\n",
"Load net from periph@nnmsl-rgb_e10_fc64x4_d1-50_s16.pth ...\n",
"Dataset loaded.\n",
"views: [5, 5, 5, 5, 5]\n"
]
}
],
"outputs": [],
"source": [
"os.chdir(sys.path[0] + '/../data/__0_user_study/us_gas_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/__0_user_study/us_mc_all_in_one')\n",
"#os.chdir(sys.path[0] + '/../data/bedroom_all_in_one')\n",
"os.chdir(os.path.join('data/__0_user_study/us_gas_all_in_one'))\n",
"#os.chdir(os.path.join('data/__0_user_study/us_mc_all_in_one'))\n",
"#os.chdir(os.path.join('data/bedroom_all_in_one'))\n",
"print('Change working directory to ', os.getcwd())\n",
"torch.autograd.set_grad_enabled(False)\n",
"\n",
......@@ -294,13 +257,6 @@
" img.save(\n",
" right_images[key], 'output/mono_test/set%d_%s_r.png' % (set_id, key))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
import sys
sys.path.append('/e/dengnc')
__package__ = "deep_view_syn"
import os
import torch
import torch.optim
......
import sys
import os
import json
import argparse
from typing import Mapping
sys.path.append(os.path.abspath(sys.path[0] + '/../'))
from utils import view
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', type=str, default='')
parser.add_argument('dataset', type=str)
args = parser.parse_args()
data_desc_path = args.dataset
data_desc_name = os.path.splitext(os.path.basename(data_desc_path))[0]
data_dir = os.path.dirname(data_desc_path) + '/'
with open(data_desc_path, 'r') as fp:
dataset_desc: Mapping = json.load(fp)
dataset_desc['cam_params'] = view.CameraParam.convert_camera_params(
dataset_desc['cam_params'],
(dataset_desc['view_res']['x'], dataset_desc['view_res']['x']))
dataset_desc['view_rots'] = [
view.euler_to_matrix([rot[1], rot[0], 0])
for rot in dataset_desc['view_rots']
] if len(dataset_desc['view_rots'][0]) == 2 else dataset_desc['view_rots']
if dataset_desc.get('gl_coord'):
del dataset_desc['gl_coord']
for i in range(len(dataset_desc['view_centers'])):
dataset_desc['view_centers'][i][2] *= -1
dataset_desc['view_rots'][i][2] *= -1
dataset_desc['view_rots'][i][5] *= -1
dataset_desc['view_rots'][i][6] *= -1
dataset_desc['view_rots'][i][7] *= -1
output_name = args.output if args.output else data_desc_name + '_cvt.json'
with open(os.path.join(data_dir, output_name), 'w') as fp:
json.dump(dataset_desc, fp, indent=4)
......@@ -2,12 +2,13 @@ import sys
import os
import torch
sys.path.append(os.path.abspath(sys.path[0] + '/../'))
os.chdir('../')
sys.path.append(os.getcwd())
from utils import img
from utils import misc
data_dir = '/home/dengnc/deep_view_syn/data/__7_challenge/classroom_r360x80_t0.3'
data_dir = 'data/__7_challenge/classroom_r360x80_t0.3'
in_set = f'{data_dir}/train_depth'
out_set = f'{data_dir}/train_depth_low'
......
......@@ -8,7 +8,6 @@ import torch.nn.functional as nn_f
from tensorboardX.writer import SummaryWriter
sys.path.append(os.path.abspath(sys.path[0] + '/../'))
__package__ = "deep_view_syn"
# ===========================================================
# Training settings
......
......@@ -45,7 +45,7 @@ def torch2np(input: torch.Tensor) -> np.ndarray:
:param input `Tensor(HW|[B]CHW|[B]HWC)`: 2D, 3D or 4D torch-image(s)
:return `ndarray ([B]HWC)`: numpy-image(s) with channels transposed to the last dim
"""
img = misc.torch2np(input.squeeze())
img = misc.torch2np(input)
if len(img.shape) == 2: # 2D(HW): Single channel image
return img
batch_input = len(img.shape) == 4
......@@ -88,7 +88,9 @@ def save(input: torch.Tensor, *paths: str):
new_paths = []
for path in paths:
new_paths += [path] if isinstance(path, str) else list(path)
if (len(input.size()) != 4 and len(new_paths) != 1) or input.size(0) != len(new_paths):
if len(input.size()) < 4:
input = input[None]
if input.size(0) != len(new_paths):
raise ValueError
np_img = torch2np(input)
if np_img.dtype.kind == 'f':
......@@ -111,7 +113,10 @@ def plot(input: torch.Tensor, *, ax: plt.Axes = None):
:param input `Tensor(HW|[B]CHW|[B]HWC)`: 2D, 3D or 4D torch-image(s)
:param ax `plt.Axes`: (optional) specify the axes to plot image
"""
return plt.imshow(torch2np(input)) if ax is None else ax.imshow(torch2np(input))
im = torch2np(input)
if len(im.shape) == 4:
im = im[0]
return plt.imshow(im) if ax is None else ax.imshow(im)
def save_video(frames: torch.Tensor, path: str, fps: int,
......@@ -138,7 +143,7 @@ def save_video(frames: torch.Tensor, path: str, fps: int,
misc.create_dir(temp_out_dir)
os.chdir(temp_out_dir)
save_seq(frames, 'out_%04d.png')
os.system(f'ffmpeg -y -r {fps:d} -i out_%04d.png -c:v libx264 -vf fps={fps:d} -pix_fmt yuv420p ../{file_name}')
os.system(f'ffmpeg -y -r {fps:d} -i out_%04d.png -c:v h264 -vf fps={fps:d} -pix_fmt yuv420p ../{file_name}')
os.chdir(cwd)
shutil.rmtree(os.path.join(dir, temp_out_dir))
......
......@@ -2,6 +2,7 @@
from typing import List, Mapping, Tuple, Union
import torch
import math
import glm
from . import misc
......@@ -14,7 +15,7 @@ class CameraParam(object):
def __init__(self, params: Mapping[str, Union[float, bool]],
res: Tuple[int, int], *, device=None) -> None:
super().__init__()
params = self._convert_camera_params(params, res)
params = CameraParam.convert_camera_params(params, res)
self.res = res
self.f = torch.tensor([params['fx'], params['fy'], 1], device=device)
self.c = torch.tensor([params['cx'], params['cy']], device=device)
......@@ -101,7 +102,8 @@ class CameraParam(object):
rays_d = trans.trans_vector(rays)
return rays_o, rays_d
def _convert_camera_params(self, input_camera_params: Mapping[str, Union[float, bool]],
@staticmethod
def convert_camera_params(input_camera_params: Mapping[str, Union[float, bool]],
view_res: Tuple[int, int]) -> Mapping[str, Union[float, bool]]:
"""
Check and convert camera parameters in config file to pixel-space
......@@ -135,7 +137,7 @@ class CameraParam(object):
class Trans(object):
def __init__(self, t: torch.Tensor, r: torch.Tensor) -> None:
def __init__(self, t: torch.Tensor, r: torch.Tensor):
self.t = t
self.r = r
if len(self.t.size()) == 1:
......@@ -230,3 +232,8 @@ def trans_vector(v: torch.Tensor, r: torch.Tensor, inverse=False) -> torch.Tenso
out = torch.matmul(v.flatten(0, -2), r).view(out_size)
return out
def euler_to_matrix(euler: Union[Tuple[float, float, float], List[float]]) -> List[float]:
q = glm.quat(glm.radians(glm.vec3(euler[0], euler[1], euler[2])))
vec_list = glm.transpose(glm.mat3_cast(q)).to_list()
return vec_list[0] + vec_list[1] + vec_list[2]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment