run_lf_syn.py 4.74 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
import sys
sys.path.append('/e/dengnc')
__package__ = "deeplightfield"

import os
import torch
import torch.optim
import torchvision
from tensorboardX import SummaryWriter
from .loss.loss import PerceptionReconstructionLoss
from .my import netio
from .my import util
BobYeah's avatar
sync    
BobYeah committed
13
from .my import device
14
15
16
17
18
from .my.simple_perf import SimplePerf
from .data.lf_syn import LightFieldSynDataset
from .trans_unet import TransUnet


BobYeah's avatar
sync    
BobYeah committed
19
20
21
torch.cuda.set_device(2)
print("Set CUDA:%d as current device." % torch.cuda.current_device())

22
23
DATA_DIR = os.path.dirname(__file__) + '/data/lf_syn_2020.12.23'
TRAIN_DATA_DESC_FILE = DATA_DIR + '/train.json'
BobYeah's avatar
sync    
BobYeah committed
24
25
26
OUTPUT_DIR = DATA_DIR + '/output_bat2'
RUN_DIR = DATA_DIR + '/run_bat2'
BATCH_SIZE = 8
27
28
29
TEST_BATCH_SIZE = 10
NUM_EPOCH = 1000
MODE = "Silence"  # "Perf"
BobYeah's avatar
sync    
BobYeah committed
30
EPOCH_BEGIN = 0
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49


def train():
    # 1. Initialize data loader
    print("Load dataset: " + TRAIN_DATA_DESC_FILE)
    train_dataset = LightFieldSynDataset(TRAIN_DATA_DESC_FILE)
    train_data_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=BATCH_SIZE,
        pin_memory=True,
        shuffle=True,
        drop_last=False)
    print(len(train_data_loader))

    # 2. Initialize components
    model = TransUnet(cam_params=train_dataset.cam_params,
                      view_images=train_dataset.sparse_view_images,
                      view_depths=train_dataset.sparse_view_depths,
                      view_positions=train_dataset.sparse_view_positions,
BobYeah's avatar
sync    
BobYeah committed
50
                      diopter_of_layers=train_dataset.diopter_of_layers).to(device.GetDevice())
51
52
53
54
55
56
57
58
59
60
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    loss = PerceptionReconstructionLoss()

    if EPOCH_BEGIN > 0:
        netio.LoadNet('%s/model-epoch_%d.pth' % (RUN_DIR, EPOCH_BEGIN), model,
                      solver=optimizer)

    # 3. Train
    model.train()
    epoch = EPOCH_BEGIN
BobYeah's avatar
BobYeah committed
61
    iters = EPOCH_BEGIN * len(train_data_loader) * BATCH_SIZE
62
63
64
65
66
67
68
69
70
71

    util.CreateDirIfNeed(RUN_DIR)

    perf = SimplePerf(enable=(MODE == "Perf"), start=True)
    writer = SummaryWriter(RUN_DIR)

    print("Begin training...")
    for epoch in range(EPOCH_BEGIN, NUM_EPOCH):
        for _, view_images, _, view_positions in train_data_loader:

BobYeah's avatar
sync    
BobYeah committed
72
            view_images = view_images.to(device.GetDevice())
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

            perf.Checkpoint("Load")

            out_view_images = model(view_positions)

            perf.Checkpoint("Forward")

            optimizer.zero_grad()
            loss_value = loss(out_view_images, view_images)

            perf.Checkpoint("Compute loss")

            loss_value.backward()

            perf.Checkpoint("Backward")

            optimizer.step()

            perf.Checkpoint("Update")

            print("Epoch: ", epoch, ", Iter: ", iters,
                  ", Loss: ", loss_value.item())

            iters = iters + BATCH_SIZE

            # Write tensorboard logs.
            writer.add_scalar("loss", loss_value, iters)
            if iters % len(train_data_loader) == 0:
                output_vs_gt = torch.cat([out_view_images, view_images], dim=0)
                writer.add_image("Output_vs_gt", torchvision.utils.make_grid(
                    output_vs_gt, scale_each=True, normalize=False)
                    .cpu().detach().numpy(), iters)

        # Save checkpoint
        if ((epoch + 1) % 50 == 0):
            netio.SaveNet('%s/model-epoch_%d.pth' % (RUN_DIR, epoch + 1), model,
                          solver=optimizer)

    print("Train finished")


def test(net_file: str):
    # 1. Load train dataset
    print("Load dataset: " + TRAIN_DATA_DESC_FILE)
    train_dataset = LightFieldSynDataset(TRAIN_DATA_DESC_FILE)
    train_data_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=TEST_BATCH_SIZE,
        pin_memory=True,
        shuffle=False,
        drop_last=False)

    # 2. Load trained model
    model = TransUnet(cam_params=train_dataset.cam_params,
                      view_images=train_dataset.sparse_view_images,
                      view_depths=train_dataset.sparse_view_depths,
                      view_positions=train_dataset.sparse_view_positions,
BobYeah's avatar
sync    
BobYeah committed
130
                      diopter_of_layers=train_dataset.diopter_of_layers).to(device.GetDevice())
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    netio.LoadNet(net_file, model)

    # 3. Test on train dataset
    print("Begin test on train dataset...")
    util.CreateDirIfNeed(OUTPUT_DIR)
    for view_idxs, view_images, _, view_positions in train_data_loader:
        out_view_images = model(view_positions)
        util.WriteImageTensor(
            view_images,
            ['%s/gt_view%02d.png' % (OUTPUT_DIR, i) for i in view_idxs])
        util.WriteImageTensor(
            out_view_images,
            ['%s/out_view%02d.png' % (OUTPUT_DIR, i) for i in view_idxs])


if __name__ == "__main__":
BobYeah's avatar
sync    
BobYeah committed
147
148
    train()
    #test(RUN_DIR + '/model-epoch_1000.pth')