main.py 15.9 KB
Newer Older
BobYeah's avatar
BobYeah committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import torch
import argparse
import os
import glob
import numpy as np
import torchvision.transforms as transforms
from torchvision.utils import save_image

from torchvision import datasets
from torch.utils.data import DataLoader 
from torch.autograd import Variable

import cv2
from gen_image import *
import json
BobYeah's avatar
BobYeah committed
16
17
from ssim import *
from perc_loss import * 
BobYeah's avatar
BobYeah committed
18
19
20
21
22
23
24
25
26
27
28
29
# param
BATCH_SIZE = 5
NUM_EPOCH = 5000

INTERLEAVE_RATE = 2

IM_H = 480
IM_W = 640

N = 9 # number of input light field stack
M = 2 # number of display layers

BobYeah's avatar
BobYeah committed
30
31
DATA_FILE = "/home/yejiannan/Project/LightField/data/try"
DATA_JSON = "/home/yejiannan/Project/LightField/data/data.json"
BobYeah's avatar
BobYeah committed
32
DATA_VAL_JSON = "/home/yejiannan/Project/LightField/data/data_val.json"
BobYeah's avatar
BobYeah committed
33
OUTPUT_DIR = "/home/yejiannan/Project/LightField/output"
BobYeah's avatar
BobYeah committed
34
35
36
37
38
39

class lightFieldDataLoader(torch.utils.data.dataset.Dataset):
    def __init__(self, file_dir_path, file_json, transforms=None):
        self.file_dir_path = file_dir_path
        self.transforms = transforms
        # self.datum_list = glob.glob(os.path.join(file_dir_path,"*"))
BobYeah's avatar
BobYeah committed
40
        with open(file_json, encoding='utf-8') as file:
BobYeah's avatar
BobYeah committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
            self.dastset_desc = json.loads(file.read())

    def __len__(self):
        return len(self.dastset_desc["focaldepth"])

    def __getitem__(self, idx):
        lightfield_images, gt, fd = self.get_datum(idx)
        if self.transforms:
            lightfield_images = self.transforms(lightfield_images)
        return (lightfield_images, gt, fd)

    def get_datum(self, idx):
        lf_image_paths = os.path.join(DATA_FILE, self.dastset_desc["train"][idx])
        # print(lf_image_paths)
        fd_gt_path = os.path.join(DATA_FILE, self.dastset_desc["gt"][idx])
        # print(fd_gt_path)
        lf_images = []
        lf_image_big = cv2.imread(lf_image_paths, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.
        lf_image_big = cv2.cvtColor(lf_image_big,cv2.COLOR_BGR2RGB)
        for i in range(9):
            lf_image = lf_image_big[i//3*IM_H:i//3*IM_H+IM_H,i%3*IM_W:i%3*IM_W+IM_W,0:3]
            # print(lf_image.shape)
            lf_images.append(lf_image)
        gt = cv2.imread(fd_gt_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.
        gt = cv2.cvtColor(gt,cv2.COLOR_BGR2RGB)
        fd = self.dastset_desc["focaldepth"][idx]
        return (np.asarray(lf_images),gt,fd)

OUT_CHANNELS_RB = 128
KERNEL_SIZE_RB = 3
KERNEL_SIZE = 3

class residual_block(torch.nn.Module):
BobYeah's avatar
BobYeah committed
74
    def __init__(self,delta_channel_dim):
BobYeah's avatar
BobYeah committed
75
76
        super(residual_block,self).__init__()
        self.layer1 = torch.nn.Sequential(
BobYeah's avatar
BobYeah committed
77
78
            torch.nn.Conv2d(OUT_CHANNELS_RB+delta_channel_dim,OUT_CHANNELS_RB+delta_channel_dim,KERNEL_SIZE_RB,stride=1,padding = 1),
            torch.nn.BatchNorm2d(OUT_CHANNELS_RB+delta_channel_dim),
BobYeah's avatar
BobYeah committed
79
80
81
            torch.nn.ELU()
        )
        self.layer2 = torch.nn.Sequential(
BobYeah's avatar
BobYeah committed
82
83
            torch.nn.Conv2d(OUT_CHANNELS_RB+delta_channel_dim,OUT_CHANNELS_RB+delta_channel_dim,KERNEL_SIZE_RB,stride=1,padding = 1),
            torch.nn.BatchNorm2d(OUT_CHANNELS_RB+delta_channel_dim),
BobYeah's avatar
BobYeah committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
            torch.nn.ELU()
        )

    def forward(self,input):
        output = self.layer1(input)
        output = self.layer2(output)
        output = input+output
        return output

class deinterleave(torch.nn.Module):
    def __init__(self, block_size):
        super(deinterleave, self).__init__()
        self.block_size = block_size
        self.block_size_sq = block_size*block_size

    def forward(self, input):
        output = input.permute(0, 2, 3, 1)
        (batch_size, d_height, d_width, d_depth) = output.size()
        s_depth = int(d_depth / self.block_size_sq)
        s_width = int(d_width * self.block_size)
        s_height = int(d_height * self.block_size)
        t_1 = output.reshape(batch_size, d_height, d_width, self.block_size_sq, s_depth)
        spl = t_1.split(self.block_size, 3)
        stack = [t_t.reshape(batch_size, d_height, s_width, s_depth) for t_t in spl]
        output = torch.stack(stack,0).transpose(0,1).permute(0,2,1,3,4).reshape(batch_size, s_height, s_width, s_depth)
        output = output.permute(0, 3, 1, 2)
        return output

class interleave(torch.nn.Module):
    def __init__(self, block_size):
        super(interleave, self).__init__()
        self.block_size = block_size
        self.block_size_sq = block_size*block_size

    def forward(self, input):
        output = input.permute(0, 2, 3, 1)
        (batch_size, s_height, s_width, s_depth) = output.size()
        d_depth = s_depth * self.block_size_sq
        d_width = int(s_width / self.block_size)
        d_height = int(s_height / self.block_size)
        t_1 = output.split(self.block_size, 2)
        stack = [t_t.reshape(batch_size, d_height, d_depth) for t_t in t_1]
        output = torch.stack(stack, 1)
        output = output.permute(0, 2, 1, 3)
        output = output.permute(0, 3, 1, 2)
        return output


LAST_LAYER_CHANNELS = 6 * INTERLEAVE_RATE**2
BobYeah's avatar
BobYeah committed
133
FIRSST_LAYER_CHANNELS = 27 * INTERLEAVE_RATE**2
BobYeah's avatar
BobYeah committed
134
135
136
137
138
139
140
141
142
143
144
145

class model(torch.nn.Module):
    def __init__(self):
        super(model, self).__init__()
        self.interleave = interleave(INTERLEAVE_RATE)

        self.first_layer = torch.nn.Sequential(
            torch.nn.Conv2d(FIRSST_LAYER_CHANNELS,OUT_CHANNELS_RB,KERNEL_SIZE,stride=1,padding=1),
            torch.nn.BatchNorm2d(OUT_CHANNELS_RB),
            torch.nn.ELU()
        )
        
BobYeah's avatar
BobYeah committed
146
147
148
        self.residual_block1 = residual_block(0)
        self.residual_block2 = residual_block(1)
        self.residual_block3 = residual_block(1)
BobYeah's avatar
BobYeah committed
149
150

        self.output_layer = torch.nn.Sequential(
BobYeah's avatar
BobYeah committed
151
            torch.nn.Conv2d(OUT_CHANNELS_RB+1,LAST_LAYER_CHANNELS,KERNEL_SIZE,stride=1,padding=1),
BobYeah's avatar
BobYeah committed
152
            torch.nn.BatchNorm2d(LAST_LAYER_CHANNELS),
BobYeah's avatar
BobYeah committed
153
            torch.nn.Sigmoid()
BobYeah's avatar
BobYeah committed
154
155
156
157
158
159
160
161
162
163
164
165
166
        )
        self.deinterleave = deinterleave(INTERLEAVE_RATE)


    def forward(self, lightfield_images, focal_length):
        # lightfield_images: torch.Size([batch_size, channels * D, H, W]) 
        # channels : RGB*D: 3*9, H:256, W:256
        input_to_net = self.interleave(lightfield_images)
        # print("after interleave:",input_to_net.shape)
        input_to_rb = self.first_layer(input_to_net)
        output = self.residual_block1(input_to_rb)
        # print("output1:",output.shape)
        
BobYeah's avatar
BobYeah committed
167
168
169
        depth_layer = torch.ones((output.shape[0],1,output.shape[2],output.shape[3]))
        # print(df.shape[0])
        for i in range(focal_length.shape[0]):
BobYeah's avatar
BobYeah committed
170
            depth_layer[i] = 1. / focal_length[i]
BobYeah's avatar
BobYeah committed
171
172
173
174
175
            # print(depth_layer.shape)
        depth_layer = var_or_cuda(depth_layer)
        output = torch.cat((output,depth_layer),dim=1)

        output = self.residual_block2(output)
BobYeah's avatar
BobYeah committed
176
177
178
179
180
181
182
183
184
185
186
187
        output = self.residual_block3(output)
        # output = output + input_to_net
        output = self.output_layer(output)
        output = self.deinterleave(output)
        return output

class Conf(object):
    def __init__(self):
        self.pupil_size = 0.02 # 2cm
        self.retinal_res = torch.tensor([ 480, 640 ])
        self.layer_res = torch.tensor([ 480, 640 ])
        self.n_layers = 2
BobYeah's avatar
BobYeah committed
188
189
        self.d_layer = [ 1., 3. ] # layers' distance
        self.h_layer = [ 1. * 480. / 640., 3. * 480. / 640. ] # layers' height
BobYeah's avatar
BobYeah committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228

#### Image Gen
conf = Conf()

v = torch.tensor([conf.h_layer[0] / conf.d_layer[0],
     conf.h_layer[0] / conf.d_layer[0] * conf.layer_res[1] / conf.layer_res[0]])

u = GenSamplesInPupil(conf, 5)

def GenRetinalFromLayersBatch(layers, conf, df, v, u):
    # layers: batchsize, 2, color, height, width 
    # Phi:torch.Size([batchsize, 480, 640, 2, 41, 2])
    # df : batchsize,..
    H_r = conf.retinal_res[0]
    W_r = conf.retinal_res[1]
    D_r = conf.retinal_res.double()
    N = conf.n_layers
    M = u.size()[0] #41
    BS = df.shape[0]
    Phi = torch.empty(BS, H_r, W_r, N, M, 2, dtype=torch.long)
    # print("Phi:",Phi.shape)

    p_rx, p_ry = torch.meshgrid(torch.tensor(range(0, H_r)),
                                torch.tensor(range(0, W_r)))
    p_r = torch.stack([p_rx, p_ry], 2).unsqueeze(2).expand(-1, -1, M, -1)
    # print("p_r:",p_r.shape) #torch.Size([480, 640, 41, 2])
    for bs in range(BS):
        for i in range(0, N):
            dpi = conf.h_layer[i] / float(conf.layer_res[0]) # 1 / 480
            # print("dpi:",dpi)
            ci = conf.layer_res / 2 # [240,320]
            di = conf.d_layer[i] # 深度
            pi_r = di * v * (1. / D_r * (p_r + 0.5) - 0.5) / dpi # [480, 640, 41, 2]
            wi = (1 - di / df[bs]) / dpi # (1 - 深度/聚焦) / dpi  df = 2.625 di = 1.75
            pi = torch.floor(pi_r + ci + wi * u)
            torch.clamp_(pi[:, :, :, 0], 0, conf.layer_res[0] - 1)
            torch.clamp_(pi[:, :, :, 1], 0, conf.layer_res[1] - 1)
            Phi[bs, :, :, i, :, :] = pi
    # print("Phi slice:",Phi[0, :, :, 0, 0, 0].shape)
BobYeah's avatar
BobYeah committed
229
    retinal = torch.ones(BS, 3, H_r, W_r)
BobYeah's avatar
BobYeah committed
230
231
232
    retinal = var_or_cuda(retinal)
    for bs in range(BS):
        for j in range(0, M):
BobYeah's avatar
BobYeah committed
233
            retinal_view = torch.ones(3, H_r, W_r)
BobYeah's avatar
BobYeah committed
234
235
            retinal_view = var_or_cuda(retinal_view)
            for i in range(0, N):
BobYeah's avatar
BobYeah committed
236
                retinal_view.mul_(layers[bs, (i * 3) : (i * 3 + 3), Phi[bs, :, :, i, j, 0], Phi[bs, :, :, i, j, 1]])
BobYeah's avatar
BobYeah committed
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
            retinal[bs,:,:,:].add_(retinal_view)
        retinal[bs,:,:,:].div_(M)
    return retinal
#### Image Gen End

def merge_two(near,far):
    df = conf.d_layer[0] + (conf.d_layer[1] - conf.d_layer[0]) / 2.
    # Phi = GenRetinal2LayerMappings(conf, df, v, u)
    # retinal = GenRetinalFromLayers(layers, Phi)
    return near[:,0:3,:,:] + far[:,3:6,:,:] / 2.0

def loss_two_images(generated, gt):
    l1_loss = torch.nn.L1Loss()
    return l1_loss(generated, gt)

weightVarScale = 0.25
bias_stddev = 0.01

def weight_init_normal(m):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        torch.nn.init.xavier_normal_(m.weight.data)
        torch.nn.init.normal_(m.bias.data,mean = 0.0, std=bias_stddev)
    elif classname.find("BatchNorm2d") != -1:
        torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
        torch.nn.init.constant_(m.bias.data, 0.0)

def var_or_cuda(x):
    if torch.cuda.is_available():
        x = x.cuda(non_blocking=True)
    return x

BobYeah's avatar
BobYeah committed
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def calImageGradients(images):
    # x is a 4-D tensor
    dx = images[:, :, 1:, :] - images[:, :, :-1, :]
    dy = images[:, 1:, :, :] - images[:, :-1, :, :]
    return dx, dy


perc_loss = VGGPerceptualLoss() 
perc_loss = perc_loss.to("cuda")

def loss_new(generated, gt):
    mse_loss = torch.nn.MSELoss()
    rmse_intensity = mse_loss(generated, gt)
    RENORM_SCALE = torch.tensor(0.9)
    RENORM_SCALE = var_or_cuda(RENORM_SCALE)
    psnr_intensity = torch.log10(rmse_intensity)
    ssim_intensity = ssim(generated, gt)
    labels_dx, labels_dy = calImageGradients(gt)
    preds_dx, preds_dy = calImageGradients(generated)
    rmse_grad_x, rmse_grad_y = mse_loss(labels_dx, preds_dx), mse_loss(labels_dy, preds_dy)
    psnr_grad_x, psnr_grad_y = torch.log10(rmse_grad_x), torch.log10(rmse_grad_y)
    p_loss = perc_loss(generated,gt)
    # print("-psnr:",-psnr_intensity,",0.5*(psnr_grad_x + psnr_grad_y):",0.5*(psnr_grad_x + psnr_grad_y),",perc_loss:",p_loss)
    total_loss = 10 + psnr_intensity + 0.5*(psnr_grad_x + psnr_grad_y) + p_loss
    return total_loss

def save_checkpoints(file_path, epoch_idx, model, model_solver):
    print('[INFO] Saving checkpoint to %s ...' % ( file_path))
    checkpoint = {
        'epoch_idx': epoch_idx,
        'model_state_dict': model.state_dict(),
        'model_solver_state_dict': model_solver.state_dict()
    }
    torch.save(checkpoint, file_path)

mode = "val"
BobYeah's avatar
BobYeah committed
305
306
307
308
309
310
311
if __name__ == "__main__":
    #test
    # train_dataset = lightFieldDataLoader(DATA_FILE,DATA_JSON)
    # print(train_dataset[0][0].shape)
    # cv2.imwrite("test_crop0.png",train_dataset[0][1]*255.)
    # save_image(output[0][0:3].data,os.path.join(OUTPUT_DIR,"o%d_%d.png"%(epoch,batch_idx)))
    #test end
BobYeah's avatar
BobYeah committed
312
313
    
    #train
BobYeah's avatar
BobYeah committed
314
315
316
317
    train_data_loader = torch.utils.data.DataLoader(dataset=lightFieldDataLoader(DATA_FILE,DATA_JSON),
                                                    batch_size=BATCH_SIZE,
                                                    num_workers=0,
                                                    pin_memory=True,
BobYeah's avatar
BobYeah committed
318
                                                    shuffle=True,
BobYeah's avatar
BobYeah committed
319
320
                                                    drop_last=False)
    print(len(train_data_loader))
BobYeah's avatar
BobYeah committed
321
322
323
324
325
326
327
328
329
330

    val_data_loader = torch.utils.data.DataLoader(dataset=lightFieldDataLoader(DATA_FILE,DATA_VAL_JSON),
                                                    batch_size=1,
                                                    num_workers=0,
                                                    pin_memory=True,
                                                    shuffle=False,
                                                    drop_last=False)

    print(len(val_data_loader))

BobYeah's avatar
BobYeah committed
331
332
333
334
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    lf_model = model()
    if torch.cuda.is_available():
        lf_model = torch.nn.DataParallel(lf_model).cuda()
BobYeah's avatar
BobYeah committed
335
336
337
338
339
340
341
342
343
344

    #val 
    checkpoint = torch.load(os.path.join(OUTPUT_DIR,"ckpt-epoch-3001.pth"))
    lf_model.load_state_dict(checkpoint["model_state_dict"])
    lf_model.eval()

    print("Eval::")
    for sample_idx, (image_set, gt, df) in enumerate(val_data_loader):
        print("sample_idx::")
        with torch.no_grad():
BobYeah's avatar
BobYeah committed
345
346
347
348
349
350
351
352
            #reshape for input
            image_set = image_set.permute(0,1,4,2,3) # N LF C H W
            image_set = image_set.reshape(image_set.shape[0],-1,image_set.shape[3],image_set.shape[4]) # N, LFxC, H, W
            image_set = var_or_cuda(image_set)
            # image_set.to(device)
            gt = gt.permute(0,3,1,2)
            gt = var_or_cuda(gt)
            # print("Epoch:",epoch,",Iter:",batch_idx,",Input shape:",image_set.shape, ",Input gt:",gt.shape)
BobYeah's avatar
BobYeah committed
353
            output = lf_model(image_set,df)
BobYeah's avatar
BobYeah committed
354
355
356
            print("output:",output.shape," df:",df)
            save_image(output[0][0:3].data,os.path.join(OUTPUT_DIR,"1113_interp_l1_%.3f.png"%(df[0].data)))
            save_image(output[0][3:6].data,os.path.join(OUTPUT_DIR,"1113_interp_l2_%.3f.png"%(df[0].data)))
BobYeah's avatar
BobYeah committed
357
            output = GenRetinalFromLayersBatch(output,conf,df,v,u)
BobYeah's avatar
BobYeah committed
358
359
            save_image(output[0][0:3].data,os.path.join(OUTPUT_DIR,"1113_interp_o%.3f.png"%(df[0].data)))
    exit()
BobYeah's avatar
BobYeah committed
360

BobYeah's avatar
BobYeah committed
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
    # train
    # print(lf_model)
    # exit()

    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # lf_model = model()
    # lf_model.apply(weight_init_normal)

    # if torch.cuda.is_available():
    #     lf_model = torch.nn.DataParallel(lf_model).cuda()
    # lf_model.train()
    # optimizer = torch.optim.Adam(lf_model.parameters(),lr=5e-2,betas=(0.9,0.999))
    
    # for epoch in range(NUM_EPOCH):
    #     for batch_idx, (image_set, gt, df) in enumerate(train_data_loader):
    #         #reshape for input
    #         image_set = image_set.permute(0,1,4,2,3) # N LF C H W
    #         image_set = image_set.reshape(image_set.shape[0],-1,image_set.shape[3],image_set.shape[4]) # N, LFxC, H, W
            
    #         image_set = var_or_cuda(image_set)
    #         # image_set.to(device)
    #         gt = gt.permute(0,3,1,2)
    #         gt = var_or_cuda(gt)
    #         # print("Epoch:",epoch,",Iter:",batch_idx,",Input shape:",image_set.shape, ",Input gt:",gt.shape)
    #         optimizer.zero_grad()
    #         output = lf_model(image_set,df)
    #         # print("output:",output.shape," df:",df.shape)
    #         output = GenRetinalFromLayersBatch(output,conf,df,v,u)
    #         loss = loss_new(output,gt)
    #         print("Epoch:",epoch,",Iter:",batch_idx,",loss:",loss)
    #         loss.backward()
    #         optimizer.step()
    #         if (epoch%100 == 0):
    #             for i in range(BATCH_SIZE):
    #                 save_image(output[i][0:3].data,os.path.join(OUTPUT_DIR,"cuda_lr_5e-2_mul_dip_newloss_debug_conf_o%d_%d.png"%(epoch,i)))
    #         if (epoch%1000 == 0):
    #             save_checkpoints(os.path.join(OUTPUT_DIR, 'ckpt-epoch-%04d.pth' % (epoch + 1)),
    #                             epoch,lf_model,optimizer)