gen_demo_mono.ipynb 8.45 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Set CUDA:0 as current device.\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "rootdir = os.path.abspath(sys.path[0] + '/../')\n",
    "sys.path.append(rootdir)\n",
    "torch.cuda.set_device(0)\n",
    "print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
    "torch.autograd.set_grad_enabled(False)\n",
    "\n",
    "from data.spherical_view_syn import *\n",
    "from configs.spherical_view_syn import SphericalViewSynConfig\n",
    "from utils import netio\n",
    "from utils import img\n",
    "from utils import device\n",
    "from utils.view import *\n",
    "from components.fnr import FoveatedNeuralRenderer\n",
    "\n",
    "\n",
    "def load_net(path):\n",
    "    config = SphericalViewSynConfig()\n",
    "    config.from_id(os.path.splitext(path)[0])\n",
    "    config.SAMPLE_PARAMS['perturb_sample'] = False\n",
    "    net = config.create_net().to(device.default())\n",
    "    netio.load(path, net)\n",
    "    return net\n",
    "\n",
    "\n",
    "def find_file(prefix):\n",
    "    for path in os.listdir():\n",
    "        if path.startswith(prefix):\n",
    "            return path\n",
    "    return None\n",
    "\n",
    "\n",
    "def load_views(data_desc_file) -> Trans:\n",
    "    with open(data_desc_file, 'r', encoding='utf-8') as file:\n",
    "        data_desc = json.loads(file.read())\n",
    "        view_centers = torch.tensor(\n",
    "            data_desc['view_centers'], device=device.default()).view(-1, 3)\n",
    "        view_rots = torch.tensor(\n",
    "            data_desc['view_rots'], device=device.default()).view(-1, 3, 3)\n",
    "        return Trans(view_centers, view_rots)\n",
    "\n",
    "\n",
    "def plot_images(images):\n",
    "    plt.figure(figsize=(12, 4))\n",
    "    plt.subplot(131)\n",
    "    img.plot(images['layers_img'][0])\n",
    "    plt.subplot(132)\n",
    "    img.plot(images['layers_img'][1])\n",
    "    plt.subplot(133)\n",
    "    img.plot(images['layers_img'][2])\n",
    "    plt.figure(figsize=(12, 12))\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
73
74
75
76
    "    img.plot(images['overlaid'])\n",
    "    plt.figure(figsize=(12, 12))\n",
    "    img.plot(images['blended_raw'])\n",
    "    plt.figure(figsize=(12, 12))\n",
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
    "    img.plot(images['blended'])\n",
    "\n",
    "\n",
    "scenes = {\n",
    "    'classroom': 'classroom_all',\n",
    "    'stones': 'stones_all',\n",
    "    'barbershop': 'barbershop_all',\n",
    "    'lobby': 'lobby_all'\n",
    "}\n",
    "\n",
    "fov_list = [20, 45, 110]\n",
    "res_list = [(256, 256), (256, 256), (256, 230)]\n",
    "res_full = (1600, 1440)"
   ]
  },
  {
   "cell_type": "code",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
94
   "execution_count": 12,
95
96
97
98
99
100
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
101
102
103
      "Change working directory to  /home/dengnc/dvs/data/__new/barbershop_all\n",
      "Load net from fovea@snerffast4-rgb_e6_fc512x4_d1.20-6.00_s64_~p.pth ...\n",
      "Load net from periph@snerffast4-rgb_e6_fc256x4_d1.20-6.00_s64_~p.pth ...\n"
104
105
106
107
     ]
    }
   ],
   "source": [
108
    "scene = 'barbershop'\n",
109
110
111
112
113
114
    "os.chdir(f'{rootdir}/data/__new/{scenes[scene]}')\n",
    "print('Change working directory to ', os.getcwd())\n",
    "\n",
    "fovea_net = load_net(find_file('fovea'))\n",
    "periph_net = load_net(find_file('periph'))\n",
    "renderer = FoveatedNeuralRenderer(fov_list, res_list, nn.ModuleList([fovea_net, periph_net, periph_net]),\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
115
    "                                  res_full, device=device.default())"
116
117
118
119
   ]
  },
  {
   "cell_type": "code",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
120
   "execution_count": 15,
121
   "metadata": {},
122
   "outputs": [],
123
124
125
126
127
128
129
130
131
132
133
134
   "source": [
    "params = {\n",
    "    'classroom': [\n",
    "        [0, 0, 0,   0, 0,   0, 0],\n",
    "        [0, 0, 0,   -53, 0,   0, 0],\n",
    "        [0, 0, 0,   20, -20,   0, 0]\n",
    "    ],\n",
    "    'stones': [\n",
    "        [0, 0, 0, 0, 10, -300, -50],\n",
    "        [0, 0, 0, 0, 10, 200, -50]\n",
    "    ],\n",
    "    'barbershop': [\n",
135
    "        [0, 0, 0,   0, 0,   0, 0],\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
136
137
138
139
    "        [0, 0, 0, 20, 0, -300, 50],\n",
    "        [0, 0, 0, -140, -30, 150, -250],\n",
    "        [0, 0, 0, -60, -30, 75, -125],\n",
    "        [0, 0, 0,   -10, -5,   0, 0]\n",
140
141
    "    ],\n",
    "    'lobby': [\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
142
143
    "        [0, 0, 0, 0, 0, 75, 0],\n",
    "        [0, 0, 0, 0, 0, 5, 150],\n",
144
145
146
147
148
149
150
    "        [0, 0, 0, -120, 0, 75, 50],\n",
    "    ]\n",
    "}\n",
    "\n",
    "for i, param in enumerate(params[scene]):\n",
    "    view = Trans(torch.tensor(param[:3], device=device.default()),\n",
    "                 torch.tensor(euler_to_matrix([-param[4], param[3], 0]), device=device.default()).view(3, 3))\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
151
152
153
    "    images = renderer(view, param[-2:], using_mask=False, ret_raw=True)\n",
    "    images['overlaid'] = renderer.foveation.synthesis(images['layers_raw'], param[-2:], do_blend=False)\n",
    "    if True:\n",
154
155
156
157
158
159
    "        outputdir = '../__demo/mono/'\n",
    "        misc.create_dir(outputdir)\n",
    "        img.save(images['layers_img'][0], f'{outputdir}{scene}_{i}_fovea.png')\n",
    "        img.save(images['layers_img'][1], f'{outputdir}{scene}_{i}_mid.png')\n",
    "        img.save(images['layers_img'][2], f'{outputdir}{scene}_{i}_periph.png')\n",
    "        img.save(images['blended'], f'{outputdir}{scene}_{i}_blended.png')\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
160
161
    "        img.save(images['overlaid'], f'{outputdir}{scene}_{i}_overlaid.png')\n",
    "        img.save(images['blended_raw'], f'{outputdir}{scene}_{i}_blended_raw.png')\n",
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
    "    else:\n",
    "        images = plot_images(images)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Load Dataset\n",
    "views = load_views('train.json')\n",
    "print('Dataset loaded.')\n",
    "print('views:', views.size())\n",
    "for view_idx in range(views.size()[0]):\n",
    "    center = (0, 0)\n",
    "    test_view = views.get(view_idx)\n",
    "    render_view(test_view, center)\n",
    "    '''\n",
    "    images = gen(center, test_view)\n",
    "    outputdir = '../__2_demo/layer_blend/'\n",
    "    misc.create_dir(outputdir)\n",
    "    for key in images:\n",
    "        img.save(images[key], outputdir + '%s_view%04d_%s.png' % (scene, view_idx, key))\n",
    "    '''\n",
    "    '''\n",
    "    images = gen(\n",
    "        center, test_view,\n",
    "        mono_trans=Trans(test_view.trans_point(\n",
    "            torch.tensor([0.03, 0, 0], device=device.default())\n",
    "        ), test_view.r))\n",
    "    outputdir = '../__2_demo/output_mono/ref_as_right_eye/'\n",
    "    misc.create_dir(outputdir)\n",
    "    for key in images:\n",
    "    key = 'blended'\n",
    "    img.save(images[key], outputdir + '%s_view%04d_%s.png' % (scene, view_idx, key))\n",
    "    '''\n",
    "    '''\n",
    "    left_images = gen(center,\n",
    "                      Trans(\n",
    "                          test_view.trans_point(\n",
    "                              torch.tensor([-0.03, 0, 0], device=device.default())\n",
    "                          ),\n",
    "                          test_view.r),\n",
    "                      mono_trans=test_view)\n",
    "    right_images = gen(center, Trans(\n",
    "        test_view.trans_point(\n",
    "            torch.tensor([0.03, 0, 0], device=device.default())\n",
    "        ), test_view.r), mono_trans=test_view)\n",
    "    outputdir = '../__2_demo/mono_periph/stereo/'\n",
    "    misc.create_dir(outputdir)\n",
    "    key = 'blended'\n",
    "    img.save(left_images[key], outputdir + '%s_view%04d_%s_l.png' % (scene, view_idx, key))\n",
    "    img.save(right_images[key], outputdir + '%s_view%04d_%s_r.png' % (scene, view_idx, key))\n",
    "    '''\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
Nianchen Deng's avatar
sync    
Nianchen Deng committed
223
224
225
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  },
  "metadata": {
   "interpreter": {
    "hash": "82066b63b621a9e3d15e3b7c11ca76da6238eff3834294910d715044bd0561e5"
   }
Nianchen Deng's avatar
sync    
Nianchen Deng committed
243
  }
244
245
246
 },
 "nbformat": 4,
 "nbformat_minor": 2
Nianchen Deng's avatar
sync    
Nianchen Deng committed
247
}