{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import sys\n", "sys.path.append('/e/dengnc')\n", "\n", "from typing import List\n", "import torch\n", "from torch import nn\n", "import matplotlib.pyplot as plt\n", "from deep_view_syn.data.lf_syn import LightFieldSynDataset\n", "from deep_view_syn.my import util\n", "from deep_view_syn.trans_unet import LatentSpaceTransformer\n", "\n", "device = torch.device(\"cuda:2\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Test data loader" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "DATA_DIR = '../data/lf_syn_2020.12.23'\n", "TRAIN_DATA_DESC_FILE = DATA_DIR + '/train.json'\n", "\n", "train_dataset = LightFieldSynDataset(TRAIN_DATA_DESC_FILE)\n", "train_data_loader = torch.utils.data.DataLoader(\n", " dataset=train_dataset,\n", " batch_size=3,\n", " num_workers=8,\n", " pin_memory=True,\n", " shuffle=True,\n", " drop_last=False)\n", "print(len(train_data_loader))\n", "\n", "print(train_dataset.cam_params)\n", "print(train_dataset.sparse_view_positions)\n", "print(train_dataset.diopter_of_layers)\n", "plt.figure()\n", "util.PlotImageTensor(train_dataset.sparse_view_images[0])\n", "plt.figure()\n", "util.PlotImageTensor(train_dataset.sparse_view_depths[0] / 255 * 10)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Test disparity wrapper" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "\n", "transformer = LatentSpaceTransformer(train_dataset.sparse_view_images.size()[2],\n", " train_dataset.cam_params,\n", " train_dataset.diopter_of_layers,\n", " train_dataset.sparse_view_positions)\n", "novel_views = torch.stack([\n", " train_dataset.view_positions[13],\n", " train_dataset.view_positions[30],\n", " train_dataset.view_positions[57],\n", "], dim=0)\n", "trans_images = transformer(train_dataset.sparse_view_images.to(device),\n", " train_dataset.sparse_view_depths.to(device),\n", " novel_views)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "mask = (torch.sum(trans_images[0], 1) > 1e-5).to(dtype=torch.float)\n", "blended = torch.sum(trans_images[0], 0)\n", "weight = torch.sum(mask, 0)\n", "blended = blended / weight.unsqueeze(0)\n", "\n", "plt.figure(figsize=(6, 6))\n", "util.PlotImageTensor(train_dataset.view_images[13])\n", "plt.figure(figsize=(6, 6))\n", "util.PlotImageTensor(blended)\n", "plt.figure(figsize=(12, 6))\n", "plt.subplot(2, 4, 1)\n", "util.PlotImageTensor(train_dataset.sparse_view_images[0])\n", "plt.subplot(2, 4, 2)\n", "util.PlotImageTensor(train_dataset.sparse_view_images[1])\n", "plt.subplot(2, 4, 3)\n", "util.PlotImageTensor(train_dataset.sparse_view_images[2])\n", "plt.subplot(2, 4, 4)\n", "util.PlotImageTensor(train_dataset.sparse_view_images[3])\n", "\n", "plt.subplot(2, 4, 5)\n", "util.PlotImageTensor(trans_images[0, 0])\n", "plt.subplot(2, 4, 6)\n", "util.PlotImageTensor(trans_images[0, 1])\n", "plt.subplot(2, 4, 7)\n", "util.PlotImageTensor(trans_images[0, 2])\n", "plt.subplot(2, 4, 8)\n", "util.PlotImageTensor(trans_images[0, 3])\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3.7.6 64-bit ('pytorch': conda)", "metadata": { "interpreter": { "hash": "a00413fa0fb6b0da754bf9fddd63461fcd32e367fc56a5d25240eae72261060e" } }, "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.6" }, "orig_nbformat": 2 }, "nbformat": 4, "nbformat_minor": 2 }