{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Set CUDA:2 as current device.\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "import os\n",
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "rootdir = os.path.abspath(sys.path[0] + '/../')\n",
    "sys.path.append(rootdir)\n",
    "torch.cuda.set_device(2)\n",
    "print(\"Set CUDA:%d as current device.\" % torch.cuda.current_device())\n",
    "\n",
    "from ..data.spherical_view_syn import *\n",
    "from ..configs.spherical_view_syn import SphericalViewSynConfig\n",
    "from utils import netio\n",
    "from utils import img\n",
    "from utils import device\n",
    "from utils import view\n",
    "from components.gen_final import GenFinal\n",
    "from utils.perf import Perf\n",
    "\n",
    "\n",
    "def load_net(path):\n",
    "    config = SphericalViewSynConfig()\n",
    "    config.from_id(path[:-4])\n",
    "    config.SAMPLE_PARAMS['perturb_sample'] = False\n",
    "    config.print()\n",
    "    net = config.create_net().to(device.default())\n",
    "    netio.load(path, net)\n",
    "    return net\n",
    "\n",
    "\n",
    "def find_file(prefix):\n",
    "    for path in os.listdir():\n",
    "        if path.startswith(prefix):\n",
    "            return path\n",
    "    return None\n",
    "\n",
    "\n",
    "def load_views(data_desc_file) -> view.Trans:\n",
    "    with open(data_desc_file, 'r', encoding='utf-8') as file:\n",
    "        data_desc = json.loads(file.read())\n",
    "        view_centers = torch.tensor(\n",
    "            data_desc['view_centers'], device=device.default()).view(-1, 3)\n",
    "        view_rots = torch.tensor(\n",
    "            data_desc['view_rots'], device=device.default()).view(-1, 3, 3)\n",
    "        return view.Trans(view_centers, view_rots)\n",
    "\n",
    "\n",
    "def plot_figures(images, center):\n",
    "    plt.figure(figsize=(8, 4))\n",
    "    plt.subplot(121)\n",
    "    img.plot(images['fovea_raw'])\n",
    "    plt.subplot(122)\n",
    "    img.plot(images['fovea'])\n",
    "\n",
    "    plt.figure(figsize=(8, 4))\n",
    "    plt.subplot(121)\n",
    "    img.plot(images['mid_raw'])\n",
    "    plt.subplot(122)\n",
    "    img.plot(images['mid'])\n",
    "\n",
    "    plt.figure(figsize=(8, 4))\n",
    "    plt.subplot(121)\n",
    "    img.plot(images['periph_raw'])\n",
    "    plt.subplot(122)\n",
    "    img.plot(images['periph'])\n",
    "\n",
    "    # Plot Blended\n",
    "    plt.figure(figsize=(12, 6))\n",
    "    plt.subplot(121)\n",
    "    img.plot(images['blended_raw'])\n",
    "    plt.subplot(122)\n",
    "    img.plot(images['blended'])\n",
    "    plt.plot([(res_full[1] - 1) / 2 + center[0] - 5, (res_full[1] - 1) / 2 + center[0] + 5],\n",
    "                [(res_full[0] - 1) / 2 + center[1],\n",
    "                (res_full[0] - 1) / 2 + center[1]],\n",
    "                color=[0, 1, 0])\n",
    "    plt.plot([(res_full[1] - 1) / 2 + center[0], (res_full[1] - 1) / 2 + center[0]],\n",
    "                [(res_full[0] - 1) / 2 + center[1] - 5,\n",
    "                (res_full[0] - 1) / 2 + center[1] + 5],\n",
    "                color=[0, 1, 0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_gas_all_in_one'))\n",
    "#os.chdir(os.path.join(rootdir, 'data/__0_user_study/us_mc_all_in_one'))\n",
    "#os.chdir(os.path.join(rootdir, 'data/__0_user_study/lobby_all_in_one'))\n",
    "print('Change working directory to ', os.getcwd())\n",
    "torch.autograd.set_grad_enabled(False)\n",
    "\n",
    "fovea_net = load_net(find_file('fovea'))\n",
    "periph_net = load_net(find_file('periph'))\n",
    "\n",
    "# Load Dataset\n",
    "views = load_views('nerf_views.json')\n",
    "print('Dataset loaded.')\n",
    "\n",
    "print('views:', views.size())\n",
    "#print('ref views:', ref_dataset.samples)\n",
    "\n",
    "fov_list = [20, 45, 110]\n",
    "res_list = [(128, 128), (256, 256), (256, 230)]  # (192,256)]\n",
    "res_full = (1600, 1440)\n",
    "gen = GenFinal(fov_list, res_list, res_full, fovea_net, periph_net,\n",
    "               device=device.default())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_view = view.Trans(\n",
    "    torch.tensor([[0.0, 0.0, 0.0]], device=device.default()),\n",
    "    torch.tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device.default())\n",
    ")\n",
    "perf = Perf(True, True)\n",
    "rays_o, rays_d = gen.layer_cams[0].get_global_rays(test_view, True)\n",
    "perf.checkpoint(\"GetRays\")\n",
    "rays_o = rays_o.view(-1, 3)\n",
    "rays_d = rays_d.view(-1, 3)\n",
    "coords, pts, depths = fovea_net.sampler(rays_o, rays_d)\n",
    "perf.checkpoint(\"Sample\")\n",
    "encoded = fovea_net.input_encoder(coords)\n",
    "perf.checkpoint(\"Encode\")\n",
    "print(\"Rays:\", rays_d)\n",
    "print(\"Spherical coords:\", coords)\n",
    "print(\"Depths:\", depths)\n",
    "print(\"Encoded:\", encoded)\n",
    "#plot_figures(images, center)\n",
    "\n",
    "#misc.create_dir('output/teasers')\n",
    "#for key in images:\n",
    "#    img.save(\n",
    "#        images[key], 'output/teasers/view%04d_%s.png' % (view_idx, key))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.9 64-bit ('pytorch': conda)",
   "name": "python379jvsc74a57bd0660ca2a75467d3af74a68fcc6f40bc78ab96b99ff17d2f100b5ca821fbb183f2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9"
  },
  "orig_nbformat": 2
 },
 "nbformat": 4,
 "nbformat_minor": 2
}