gen_demo_mono.ipynb 9.16 KB
Newer Older
1
2
3
4
{
 "cells": [
  {
   "cell_type": "code",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
5
6
7
   "execution_count": null,
   "metadata": {},
   "outputs": [],
8
   "source": [
Nianchen Deng's avatar
sync    
Nianchen Deng committed
9
    "%matplotlib inline\n",
10
11
12
13
14
15
16
17
    "import sys\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "rootdir = os.path.abspath(sys.path[0] + '/../')\n",
    "sys.path.append(rootdir)\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
18
19
    "\n",
    "torch.cuda.set_device(3)\n",
20
21
22
    "print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
    "torch.autograd.set_grad_enabled(False)\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
23
24
25
    "import model\n",
    "from data import Dataset\n",
    "from utils import netio, img, device\n",
26
    "from utils.view import *\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
27
    "from utils.type import PathLike\n",
28
    "from components.fnr import FoveatedNeuralRenderer\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
29
    "from components.render import render\n",
30
31
    "\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
32
33
34
35
36
    "def load_model(model_path: PathLike):\n",
    "    return model.deserialize(netio.load_checkpoint(model_path)[0],\n",
    "                             raymarching_early_stop_tolerance=0.01,\n",
    "                             raymarching_chunk_size_or_sections=None,\n",
    "                             perturb_sample=False).eval().to(device.default())\n",
37
38
39
40
41
42
43
44
45
    "\n",
    "\n",
    "def find_file(prefix):\n",
    "    for path in os.listdir():\n",
    "        if path.startswith(prefix):\n",
    "            return path\n",
    "    return None\n",
    "\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
46
47
48
49
50
51
52
53
54
55
    "def create_renderer(*nets, fov_scale=1.):\n",
    "    fov_list = [20, 45, 110]\n",
    "    for i in range(len(fov_list)):\n",
    "        fov_list[i] = length2fov(fov2length(fov_list[i]) * fov_scale)\n",
    "    res_list = [(256, 256), (256, 256), (256, 230)]\n",
    "    res_full = (1600, 1440)\n",
    "    return FoveatedNeuralRenderer(fov_list, res_list, nn.ModuleList(nets), res_full,\n",
    "                                  device=device.default())\n",
    "\n",
    "\n",
56
57
58
59
60
61
62
63
    "def plot_images(images):\n",
    "    plt.figure(figsize=(12, 4))\n",
    "    plt.subplot(131)\n",
    "    img.plot(images['layers_img'][0])\n",
    "    plt.subplot(132)\n",
    "    img.plot(images['layers_img'][1])\n",
    "    plt.subplot(133)\n",
    "    img.plot(images['layers_img'][2])\n",
Nianchen Deng's avatar
Nianchen Deng committed
64
    "    #plt.figure(figsize=(12, 12))\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
65
    "    # img.plot(images['overlaid'])\n",
Nianchen Deng's avatar
Nianchen Deng committed
66
    "    #plt.figure(figsize=(12, 12))\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
67
    "    # img.plot(images['blended_raw'])\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
68
    "    plt.figure(figsize=(12, 12))\n",
69
70
71
    "    img.plot(images['blended'])\n",
    "\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
72
73
74
75
76
77
78
79
80
81
82
83
84
85
    "def save_images(images, scene, i):\n",
    "    outputdir = '../__demo/mono/'\n",
    "    os.makedirs(outputdir, exist_ok=True)\n",
    "    for layer in range(len(images[\"layers_img\"])):\n",
    "        img.save(images['layers_img'][layer], f'{outputdir}{scene}_{i:04d}({layer}).png')\n",
    "    img.save(images['blended'], f'{outputdir}{scene}_{i:04d}.png')\n",
    "    if \"overlaid\" in images:\n",
    "        img.save(images['overlaid'], f'{outputdir}{scene}_{i:04d}_overlaid.png')\n",
    "    if \"blended_raw\" in images:\n",
    "        img.save(images['blended_raw'], f'{outputdir}{scene}_{i:04d}_noCE.png')\n",
    "    if \"nerf\" in images:\n",
    "        img.save(images['nerf'], f'{outputdir}{scene}_{i:04d}_nerf.png')\n",
    "\n",
    "\n",
86
    "scenes = {\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
87
88
89
90
91
    "    'classroom': '__new/classroom_all',\n",
    "    'stones': '__new/stones_all',\n",
    "    'barbershop': '__new/barbershop_all',\n",
    "    'lobby': '__new/lobby_all',\n",
    "    \"bedroom2\": \"__captured/bedroom2\"\n",
92
93
    "}\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
94
95
96
    "\n",
    "scene = \"bedroom2\"\n",
    "os.chdir(f'{rootdir}/data/{scenes[scene]}')\n",
97
98
    "print('Change working directory to ', os.getcwd())\n",
    "\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
99
100
101
102
    "fovea_net = load_model(find_file('fovea'))\n",
    "periph_net = load_model(find_file('periph'))\n",
    "nerf_net = load_model(find_file(\"nerf\"))"
   ]
103
104
105
  },
  {
   "cell_type": "code",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
106
107
108
   "execution_count": null,
   "metadata": {},
   "outputs": [],
109
110
111
   "source": [
    "params = {\n",
    "    'classroom': [\n",
Nianchen Deng's avatar
Nianchen Deng committed
112
113
114
    "        #[0, 0, 0,   -53, 0,   0, 0],\n",
    "        \n",
    "        #For Eval\n",
115
    "        [0, 0, 0,   0, 0,   0, 0],\n",
Nianchen Deng's avatar
Nianchen Deng committed
116
117
118
119
120
121
122
123
124
125
126
    "        [0, 0, 0,   20, -20,   0, 0],\n",
    "        [-0.03, 0, 0, 0, 0, 0, -83],\n",
    "        [0.03, 0, 0, 0, 0, 0, -83],\n",
    "        [0.3, 0, 0.3, 0, 0], # For panorama (Trans)\n",
    "        [-0.3, -0.3, -0.3, 0, 0], # For panorama (Trans)\n",
    "        [0, -0.3, 0.3, 0, 10, 0, 0], # For panorama (V-D)\n",
    "        [0, 0.3, 0.3, 0, 10, 0, 0], # For panorama (V-D)\n",
    "        [0, 0.3, 0.3, 0, 10, 160, 350], # For panorama (New)\n",
    "        \n",
    "        # For fig latency-quality\n",
    "        #[0, 0, 0,   10, -13,   0, 0], \n",
127
128
    "    ],\n",
    "    'stones': [\n",
Nianchen Deng's avatar
Nianchen Deng committed
129
130
131
132
133
134
    "        #[0, 0, 0, 0, 10, -300, -50],\n",
    "        #[0, 0, 0, 0, 10, 200, -50],\n",
    "        #For Eval\n",
    "        [-0.5, -0.5, -0.5, -25, 0, 50, -230],\n",
    "        [-0.5, -0.5, -0.5, 0, 0, 280, -220],\n",
    "        [-0.5, 0, 0.0, -30, 5, 0, 0],\n",
135
136
    "    ],\n",
    "    'barbershop': [\n",
Nianchen Deng's avatar
Nianchen Deng committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    "        #[0, 0, 0,   0, 0,   0, 0],\n",
    "        #[0, 0, 0, 20, 0, -300, 50], #For fig rendering-system\n",
    "        #[0, 0, 0, -140, -30, 150, -250],\n",
    "        #[0, 0, 0, -60, -30, 75, -125],\n",
    "        #For Teaser & Eval\n",
    "        [0, 0, 0,   20, 10,   0, 0],\n",
    "        [0, 0, 0,   -20, -10,   0, 0],\n",
    "        [0.15, 0, 0.15,   -13, -5,   0, 0],\n",
    "        [-0.15, -0.15, 0, 12, 12, 0, 0],\n",
    "        [-0.15, 0, 0.15, -35, 2, 0, 0],\n",
    "        [0, 0.15, 0.15, -13, 10, 0, 0],\n",
    "        [0.15, 0.15, 0, 43, 2, 0, 0],\n",
    "        [-0.15, 0.15, 0.15, -53, -21, 0, 0],\n",
    "        [-0.15, 0.15, 0.15, -53, -21, 200, -200]\n",
151
152
    "    ],\n",
    "    'lobby': [\n",
Nianchen Deng's avatar
Nianchen Deng committed
153
154
155
156
157
158
159
160
161
    "        #[0, 0, 0, 0, 0, 75, 0],\n",
    "        #[0, 0, 0, 0, 0, 5, 150],\n",
    "        #[0.5, 0, 0.5, 29, -12, 0, 0],\n",
    "        #For Eval\n",
    "        [-0.5, -0.5, -0.5, -25, 0, -150, 0],\n",
    "        [-0.5, -0.5, -0.5, 25, 25, -150, 200],\n",
    "        [-0.03, 0, 0, 0, 0, 75, -20],\n",
    "        [0.03, 0, 0, 0, 0, 71, -20]\n",
    "        #[0, 0, 0, -120, 0, 75, 50],\n",
162
163
164
165
166
    "    ]\n",
    "}\n",
    "\n",
    "for i, param in enumerate(params[scene]):\n",
    "    view = Trans(torch.tensor(param[:3], device=device.default()),\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
167
    "                 torch.tensor(euler_to_matrix(-param[4], param[3], 0), device=device.default()).view(3, 3))\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
168
169
170
    "    images = renderer(view, param[-2:], using_mask=False, ret_raw=True)\n",
    "    images['overlaid'] = renderer.foveation.synthesis(images['layers_raw'], param[-2:], do_blend=False)\n",
    "    if True:\n",
171
    "        outputdir = '../__demo/mono/'\n",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
172
    "        os.makedirs(outputdir, exist_ok=True)\n",
173
174
175
176
    "        img.save(images['layers_img'][0], f'{outputdir}{scene}_{i}_fovea.png')\n",
    "        img.save(images['layers_img'][1], f'{outputdir}{scene}_{i}_mid.png')\n",
    "        img.save(images['layers_img'][2], f'{outputdir}{scene}_{i}_periph.png')\n",
    "        img.save(images['blended'], f'{outputdir}{scene}_{i}_blended.png')\n",
Nianchen Deng's avatar
Nianchen Deng committed
177
178
    "        #img.save(images['overlaid'], f'{outputdir}{scene}_{i}_overlaid.png')\n",
    "        #img.save(images['blended_raw'], f'{outputdir}{scene}_{i}.png')\n",
179
180
    "    else:\n",
    "        images = plot_images(images)\n"
Nianchen Deng's avatar
sync    
Nianchen Deng committed
181
   ]
182
183
184
  },
  {
   "cell_type": "code",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
185
186
187
   "execution_count": null,
   "metadata": {},
   "outputs": [],
188
   "source": [
Nianchen Deng's avatar
sync    
Nianchen Deng committed
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
    "def load_views(data_desc_file) -> tuple[list[int], Trans]:\n",
    "    dataset = Dataset(data_desc_file)\n",
    "    return dataset.indices.tolist(),\\\n",
    "        Trans(dataset.centers, dataset.rots).to(device.default())\n",
    "\n",
    "\n",
    "demos = [ # view_idx, center_x, center_y, fov_scale\n",
    "    [220, 30, 25, 0.7],\n",
    "    [235, 0, 130, 0.7],\n",
    "    [239, 70, 140, 0.7],\n",
    "    [841, -100, 160, 0.7]\n",
    "]\n",
    "indices, views = load_views('images.json')\n",
    "for demo_idx in [0]:\n",
    "    view_idx = demos[demo_idx][0]\n",
    "    i = indices.index(view_idx)\n",
    "    center = tuple(demos[demo_idx][1:3])\n",
    "    renderer = create_renderer(fovea_net, periph_net, periph_net, fov_scale=demos[demo_idx][3])\n",
    "    images = renderer(views.get(i), center, using_mask=False)\n",
    "    #nerf_fovea = render(nerf_net, renderer.cam, views.get(i), None, batch_size=16384)[\"color\"]\n",
    "    #images[\"nerf\"] = nerf_fovea\n",
    "    plot_images(images)\n",
    "    #save_images(images, scene, view_idx)\n"
   ]
213
214
215
216
  }
 ],
 "metadata": {
  "kernelspec": {
Nianchen Deng's avatar
sync    
Nianchen Deng committed
217
218
219
   "display_name": "Python 3.10.0 ('dvs')",
   "language": "python",
   "name": "python3"
220
221
222
223
224
225
226
227
228
229
230
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
Nianchen Deng's avatar
sync    
Nianchen Deng committed
231
   "version": "3.10.0"
232
233
234
235
236
  },
  "metadata": {
   "interpreter": {
    "hash": "82066b63b621a9e3d15e3b7c11ca76da6238eff3834294910d715044bd0561e5"
   }
Nianchen Deng's avatar
Nianchen Deng committed
237
  },
Nianchen Deng's avatar
sync    
Nianchen Deng committed
238
239
240
241
  "vscode": {
   "interpreter": {
    "hash": "4469b029896260c1221afa6e0e6159922aafd2738570e75b7bc15e28db242604"
   }
Nianchen Deng's avatar
sync    
Nianchen Deng committed
242
  }
243
244
 },
 "nbformat": 4,
Nianchen Deng's avatar
Nianchen Deng committed
245
 "nbformat_minor": 4
Nianchen Deng's avatar
sync    
Nianchen Deng committed
246
}