gen_crop.ipynb 4.22 KB
Newer Older
Nianchen Deng's avatar
Nianchen Deng committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "rootdir = os.path.abspath(sys.path[0] + '/../')\n",
    "sys.path.append(rootdir)\n",
    "torch.cuda.set_device(0)\n",
    "print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
    "torch.autograd.set_grad_enabled(False)\n",
    "\n",
    "from data.spherical_view_syn import *\n",
    "from configs.spherical_view_syn import SphericalViewSynConfig\n",
    "from utils import netio\n",
    "from utils import img\n",
    "from utils import device\n",
    "from utils.view import *\n",
    "from components.fnr import FoveatedNeuralRenderer\n",
    "\n",
    "datadir = f\"{rootdir}/data/__new/__demo/for_crop\"\n",
    "figs = ['our', 'gt', 'nerf', 'fgt']\n",
    "crops = {\n",
    "    'classroom_0': [[720, 800, 128], [1097, 982, 256]],\n",
    "    'lobby_1': [[570, 1000, 100], [1049, 1049, 256]],\n",
    "    'stones_2': [[720, 800, 100], [680, 1317, 256]],\n",
    "    'barbershop_3': [[745, 810, 100], [1135, 627, 256]]\n",
    "}\n",
    "colors = torch.tensor([[0, 1, 0, 1], [1, 1, 0, 1]], dtype=torch.float)\n",
    "border = 10\n",
    "\n",
    "for scene in crops:\n",
    "    images = img.load([f\"{datadir}/origin/{scene}_{fig}.png\" for fig in figs])\n",
    "    halfw = images.size(-1) // 2\n",
    "    halfh = images.size(-2) // 2\n",
    "    crop = crops[scene]\n",
    "    fovea_patches = images[...,\n",
    "                           crop[0][1] - crop[0][2] // 2: crop[0][1] + crop[0][2] // 2,\n",
    "                           crop[0][0] - crop[0][2] // 2: crop[0][0] + crop[0][2] // 2]\n",
    "    periph_patches = images[...,\n",
    "                            crop[1][1] - crop[1][2] // 2: crop[1][1] + crop[1][2] // 2,\n",
    "                            crop[1][0] - crop[1][2] // 2: crop[1][0] + crop[1][2] // 2]\n",
    "    fovea_patches = nn_f.interpolate(fovea_patches, (128, 128))\n",
    "    periph_patches = nn_f.interpolate(periph_patches, (128, 128))\n",
    "    overlay = torch.zeros(1, 4, 1600, 1440)\n",
    "    mask = torch.zeros(2, 1600, 1440, dtype=torch.bool)\n",
    "    for i in range(2):\n",
    "        mask[i,\n",
    "             crop[i][1] - crop[i][2] // 2 - border: crop[i][1] + crop[i][2] // 2 + border,\n",
    "             crop[i][0] - crop[i][2] // 2 - border: crop[i][0] + crop[i][2] // 2 + border] = True\n",
    "        mask[i,\n",
    "             crop[i][1] - crop[i][2] // 2: crop[i][1] + crop[i][2] // 2,\n",
    "             crop[i][0] - crop[i][2] // 2: crop[i][0] + crop[i][2] // 2] = False\n",
    "    overlay[:, :, mask[0]] = colors[0][..., None]\n",
    "    overlay[:, :, mask[1]] = colors[1][..., None]\n",
    "    plt.figure(figsize=(12, 6))\n",
    "    plt.subplot(1, 2, 1)\n",
    "    img.plot(images[0])\n",
    "    plt.subplot(1, 2, 2)\n",
    "    img.plot(overlay)\n",
    "    plt.figure(figsize=(12, 6))\n",
    "    for i in range(4):\n",
    "        plt.subplot(2, 4, i + 1)\n",
    "        img.plot(fovea_patches[i])\n",
    "    for i in range(4):\n",
    "        plt.subplot(2, 4, i + 5)\n",
    "        img.plot(periph_patches[i])\n",
    "    img.save(fovea_patches, [f\"{datadir}/fovea/{scene}_{fig}.png\" for fig in figs])\n",
    "    img.save(periph_patches, [f\"{datadir}/periph/{scene}_{fig}.png\" for fig in figs])\n",
    "    img.save(torch.cat([fovea_patches, periph_patches], dim=-1),\n",
    "             [f\"{datadir}/patch/{scene}_{fig}.png\" for fig in figs])\n",
    "    img.save(overlay, f\"{datadir}/overlay/{scene}.png\")\n"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "82066b63b621a9e3d15e3b7c11ca76da6238eff3834294910d715044bd0561e5"
  },
  "kernelspec": {
   "display_name": "Python 3.8.5 64-bit ('base': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  },
  "orig_nbformat": 2
 },
 "nbformat": 4,
 "nbformat_minor": 2
}