From 7e0ade21aac998ce15d432b21c968f1c6ca58689 Mon Sep 17 00:00:00 2001
From: Nianchen Deng <dengnianchen@sjtu.edu.cn>
Date: Wed, 28 Apr 2021 10:12:28 +0800
Subject: [PATCH] sync

---
 data/blender_gen/gen_fovea.py  | 101 +++++++++++++++++++++++++++++++++
 data/blender_gen/gen_periph.py | 101 +++++++++++++++++++++++++++++++++
 data/calc_range.py             |  32 +++++++++++
 data/gen_seq.py                |  97 +++++++++++++++++++++++++++++++
 data/gen_subset.py             | 100 ++++++++++++++++++++++++++++++++
 data/split_dataset.py          |  86 ++++++++++++++++++++++++++++
 6 files changed, 517 insertions(+)
 create mode 100644 data/blender_gen/gen_fovea.py
 create mode 100644 data/blender_gen/gen_periph.py
 create mode 100644 data/calc_range.py
 create mode 100644 data/gen_seq.py
 create mode 100644 data/gen_subset.py
 create mode 100644 data/split_dataset.py

diff --git a/data/blender_gen/gen_fovea.py b/data/blender_gen/gen_fovea.py
new file mode 100644
index 0000000..6d002fb
--- /dev/null
+++ b/data/blender_gen/gen_fovea.py
@@ -0,0 +1,101 @@
+import bpy
+import math
+import json
+import os
+import math
+import numpy as np
+from itertools import product
+
+
+scene = bpy.context.scene
+cam_obj = scene.camera
+cam = cam_obj.data
+scene.cycles.device = 'GPU'
+
+dataset_name = 'train'
+tbox = [0.6, 0.6, 0.6]
+rbox = [320, 40]
+
+dataset_desc = {
+    'view_file_pattern': '%s/view_%%04d.png' % dataset_name,
+    "gl_coord": True,
+    'view_res': {
+        'x': 512,
+        'y': 512
+    },
+    'cam_params': {
+        'fov': 40.0, 
+        'cx': 0.5,
+        'cy': 0.5,
+        'normalized': True
+    },
+    'range': {
+        'min': [-tbox[0] / 2, -tbox[1] / 2, -tbox[2] / 2, -rbox[0] / 2, -rbox[1] / 2],
+        'max': [tbox[0] / 2, tbox[1] / 2, tbox[2] / 2, rbox[0] / 2, rbox[1] / 2]
+    },
+    'samples': [5, 5, 5, 9, 2],
+    #'samples': [2000],
+    'view_centers': [],
+    'view_rots': []
+}
+data_desc_file = f'output/{dataset_name}.json'
+
+if not os.path.exists('output'):
+    os.mkdir('output')
+
+if os.path.exists(data_desc_file):
+    with open(data_desc_file, 'r') as fp:
+        dataset_desc.update(json.load(fp))
+with open(data_desc_file, 'w') as fp:
+    json.dump(dataset_desc, fp, indent=4)
+
+# Output resolution
+scene.render.resolution_x = dataset_desc['view_res']['x']
+scene.render.resolution_y = dataset_desc['view_res']['y']
+
+# Field of view
+cam.lens_unit = 'FOV'
+cam.angle = math.radians(dataset_desc['cam_params']['fov'])
+cam.dof.use_dof = False
+
+
+def add_sample(i, x, y, z, rx, ry, render_only=False):
+    cam_obj.location = [x, y, z]
+    cam_obj.rotation_euler = [math.radians(ry), math.radians(rx), 0]
+    scene.render.filepath = 'output/' + dataset_desc['view_file_pattern'] % i
+    bpy.ops.render.render(write_still=True)
+    if not render_only:
+        dataset_desc['view_centers'].append(list(cam_obj.location))
+        dataset_desc['view_rots'].append([rx, ry])
+        with open(data_desc_file, 'w') as fp:
+            json.dump(dataset_desc, fp, indent=4)
+
+for i in range(len(dataset_desc['view_centers'])):
+    if not os.path.exists('output/' + dataset_desc['view_file_pattern'] % i):
+        add_sample(i, *dataset_desc['view_centers'][i], *dataset_desc['view_rots'][i], render_only=True)
+
+start_view = len(dataset_desc['view_centers'])
+if len(dataset_desc['samples']) == 1:
+    range_min = np.array(dataset_desc['range']['min'])
+    range_max = np.array(dataset_desc['range']['max'])
+    samples = (range_max - range_min) * np.random.rand(dataset_desc['samples'][0], 5) + range_min
+    for i in range(start_view, dataset_desc['samples'][0]):
+        add_sample(i, *list(samples[i]))
+else:
+    ranges = [
+        np.linspace(dataset_desc['range']['min'][i],
+                    dataset_desc['range']['max'][i],
+                    dataset_desc['samples'][i])
+        for i in range(0, 3)
+    ] + [
+        np.linspace(dataset_desc['range']['min'][i],
+                    dataset_desc['range']['max'][i],
+                    dataset_desc['samples'][i])
+        for i in range(3, 5)
+    ]
+
+    i = 0
+    for x, y, z, rx, ry in product(*ranges):
+        if i >= start_view:
+            add_sample(i, x, y, z, rx, ry)
+        i += 1
diff --git a/data/blender_gen/gen_periph.py b/data/blender_gen/gen_periph.py
new file mode 100644
index 0000000..b005cb7
--- /dev/null
+++ b/data/blender_gen/gen_periph.py
@@ -0,0 +1,101 @@
+import bpy
+import math
+import json
+import os
+import math
+import numpy as np
+from itertools import product
+
+
+scene = bpy.context.scene
+cam_obj = scene.camera
+cam = cam_obj.data
+scene.cycles.device = 'GPU'
+
+dataset_name = 'train'
+tbox = [0.7, 0.7, 0.7]
+rbox = [300, 120]
+
+dataset_desc = {
+    'view_file_pattern': '%s/view_%%04d.png' % dataset_name,
+    "gl_coord": True,
+    'view_res': {
+        'x': 512,
+        'y': 512
+    },
+    'cam_params': {
+        'fov': 60.0,
+        'cx': 0.5,
+        'cy': 0.5,
+        'normalized': True
+    },
+    'range': {
+        'min': [-tbox[0] / 2, -tbox[1] / 2, -tbox[2] / 2, -rbox[0] / 2, -rbox[1] / 2],
+        'max': [tbox[0] / 2, tbox[1] / 2, tbox[2] / 2, rbox[0] / 2, rbox[1] / 2]
+    },
+    'samples': [5, 5, 5, 6, 3],
+    #'samples': [2000],
+    'view_centers': [],
+    'view_rots': []
+}
+data_desc_file = f'output/{dataset_name}.json'
+
+if not os.path.exists('output'):
+    os.mkdir('output')
+
+if os.path.exists(data_desc_file):
+    with open(data_desc_file, 'r') as fp:
+        dataset_desc.update(json.load(fp))
+with open(data_desc_file, 'w') as fp:
+    json.dump(dataset_desc, fp, indent=4)
+
+# Output resolution
+scene.render.resolution_x = dataset_desc['view_res']['x']
+scene.render.resolution_y = dataset_desc['view_res']['y']
+
+# Field of view
+cam.lens_unit = 'FOV'
+cam.angle = math.radians(dataset_desc['cam_params']['fov'])
+cam.dof.use_dof = False
+
+
+def add_sample(i, x, y, z, rx, ry, render_only=False):
+    cam_obj.location = [x, y, z]
+    cam_obj.rotation_euler = [math.radians(ry), math.radians(rx), 0]
+    scene.render.filepath = 'output/' + dataset_desc['view_file_pattern'] % i
+    bpy.ops.render.render(write_still=True)
+    if not render_only:
+        dataset_desc['view_centers'].append(list(cam_obj.location))
+        dataset_desc['view_rots'].append([rx, ry])
+        with open(data_desc_file, 'w') as fp:
+            json.dump(dataset_desc, fp, indent=4)
+
+for i in range(len(dataset_desc['view_centers'])):
+    if not os.path.exists('output/' + dataset_desc['view_file_pattern'] % i):
+        add_sample(i, *dataset_desc['view_centers'][i], *dataset_desc['view_rots'][i], render_only=True)
+
+start_view = len(dataset_desc['view_centers'])
+if len(dataset_desc['samples']) == 1:
+    range_min = np.array(dataset_desc['range']['min'])
+    range_max = np.array(dataset_desc['range']['max'])
+    samples = (range_max - range_min) * np.random.rand(dataset_desc['samples'][0], 5) + range_min
+    for i in range(start_view, dataset_desc['samples'][0]):
+        add_sample(i, *list(samples[i]))
+else:
+    ranges = [
+        np.linspace(dataset_desc['range']['min'][i],
+                    dataset_desc['range']['max'][i],
+                    dataset_desc['samples'][i])
+        for i in range(0, 3)
+    ] + [
+        np.linspace(dataset_desc['range']['min'][i],
+                    dataset_desc['range']['max'][i],
+                    dataset_desc['samples'][i])
+        for i in range(3, 5)
+    ]
+
+    i = 0
+    for x, y, z, rx, ry in product(*ranges):
+        if i >= start_view:
+            add_sample(i, x, y, z, rx, ry)
+        i += 1
diff --git a/data/calc_range.py b/data/calc_range.py
new file mode 100644
index 0000000..477a167
--- /dev/null
+++ b/data/calc_range.py
@@ -0,0 +1,32 @@
+import json
+import sys
+import os
+import argparse
+import numpy as np
+import torch
+
+sys.path.append(os.path.abspath(sys.path[0] + '/../'))
+
+from utils import misc
+
+parser = argparse.ArgumentParser()
+parser.add_argument('dataset', type=str)
+args = parser.parse_args()
+
+
+data_desc_path = args.dataset
+data_desc_name = os.path.splitext(os.path.basename(data_desc_path))[0]
+data_dir = os.path.dirname(data_desc_path) + '/'
+
+with open(data_desc_path, 'r') as fp:
+    dataset_desc = json.load(fp)
+
+centers = np.array(dataset_desc['view_centers'])
+t_max = np.max(centers, axis=0)
+t_min = np.min(centers, axis=0)
+dataset_desc['range'] = {
+    'min': [t_min[0], t_min[1], t_min[2], 0, 0],
+    'max': [t_max[0], t_max[1], t_max[2], 0, 0]
+}
+with open(data_desc_path, 'w') as fp:
+    json.dump(dataset_desc, fp, indent=4)
\ No newline at end of file
diff --git a/data/gen_seq.py b/data/gen_seq.py
new file mode 100644
index 0000000..7dbb7d6
--- /dev/null
+++ b/data/gen_seq.py
@@ -0,0 +1,97 @@
+import json
+import sys
+import os
+import argparse
+import numpy as np
+
+sys.path.append(os.path.abspath(sys.path[0] + '/../'))
+
+from utils import seqs
+from utils import misc
+from utils.constants import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-r', '--rot-range', nargs='+', type=int)
+parser.add_argument('-t', '--trans-range', nargs='+', type=float)
+parser.add_argument('--fov', type=float)
+parser.add_argument('--res', type=str)
+parser.add_argument('--gl', action='store_true')
+parser.add_argument('-s', '--seq', type=str, required=True)
+parser.add_argument('-n', '--views', type=int, required=True)
+parser.add_argument('-o', '--out-desc', type=str)
+parser.add_argument('--ref', type=str)
+parser.add_argument('dataset', type=str)
+args = parser.parse_args()
+
+
+data_dir = args.dataset
+misc.create_dir(data_dir)
+out_desc_path = os.path.join(data_dir, (args.out_desc if args.out_desc else f"{args.seq}.json"))
+
+if args.ref:
+    with open(os.path.join(data_dir, args.ref), 'r') as fp:
+        ref_desc = json.load(fp)
+else:
+    if not args.trans_range or not args.rot_range or not args.fov or not args.res:
+        print('-r, -t, --fov, --res options are required if --ref is not specified')
+        exit(-1)
+    ref_desc = None
+
+if args.trans_range:
+    trans_range = np.array(list(args.trans_range) * 3 if len(args.trans_range) == 1
+                           else args.trans_range)
+else:
+    trans_range = np.array(ref_desc['range']['max'][0:3]) - \
+        np.array(ref_desc['range']['min'][0:3])
+if args.rot_range:
+    rot_range = np.array(list(args.rot_range) * 2 if len(args.rot_range) == 1
+                         else args.rot_range)
+else:
+    rot_range = np.array(ref_desc['range']['max'][3:5]) - \
+        np.array(ref_desc['range']['min'][3:5])
+filter_range = np.concatenate([trans_range, rot_range])
+
+if args.fov:
+    cam_params = {
+        'fov': args.fov,
+        'cx': 0.5,
+        'cy': 0.5,
+        'normalized': True
+    }
+else:
+    cam_params = ref_desc['cam_params']
+
+if args.res:
+    res = tuple(int(s) for s in args.res.split('x'))
+    res = {'x': res[0], 'y': res[1]}
+else:
+    res = ref_desc['view_res']
+
+if args.seq == 'helix':
+    centers, rots = seqs.helix(trans_range, 4, args.views)
+elif args.seq == 'scan_around':
+    centers, rots = seqs.scan_around(trans_range, 1, args.views)
+elif args.seq == 'look_around':
+    centers, rots = seqs.look_around(trans_range, args.views)
+
+rots *= 180 / PI
+gl = args.gl or ref_desc.get('gl_coord')
+if gl:
+    centers[:, 2] *= -1
+    rots[:, 0] *= -1
+
+dataset_desc = {
+    'gl_coord': gl,
+    'view_res': res,
+    'cam_params': cam_params,
+    'range': {
+        'min': (-0.5 * filter_range).tolist(),
+        'max': (0.5 * filter_range).tolist()
+    },
+    'samples': [args.views],
+    'view_centers': centers.tolist(),
+    'view_rots': rots.tolist()
+}
+
+with open(out_desc_path, 'w') as fp:
+    json.dump(dataset_desc, fp, indent=4)
diff --git a/data/gen_subset.py b/data/gen_subset.py
new file mode 100644
index 0000000..30d23f8
--- /dev/null
+++ b/data/gen_subset.py
@@ -0,0 +1,100 @@
+import json
+import sys
+import os
+import argparse
+import numpy as np
+
+sys.path.append(os.path.abspath(sys.path[0] + '/../'))
+
+from utils import misc
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-r', '--rot-range', nargs='+', type=int)
+parser.add_argument('-t', '--trans-range', nargs='+', type=float)
+parser.add_argument('-k', '--trainset-ratio', type=float, default=0.7)
+parser.add_argument('dataset', type=str)
+args = parser.parse_args()
+
+
+data_desc_path = args.dataset
+data_desc_name = os.path.splitext(os.path.basename(data_desc_path))[0]
+data_dir = os.path.dirname(data_desc_path) + '/'
+
+with open(data_desc_path, 'r') as fp:
+    dataset_desc = json.load(fp)
+
+if args.trans_range:
+    trans_range = np.array(args.trans_range)
+else:
+    trans_range = np.array(dataset_desc['range']['max'][0:3]) - \
+        np.array(dataset_desc['range']['min'][0:3])
+if args.rot_range:
+    rot_range = np.array(args.rot_range)
+else:
+    rot_range = np.array(dataset_desc['range']['max'][3:5]) - \
+        np.array(dataset_desc['range']['min'][3:5])
+filter_range = np.concatenate([trans_range, rot_range])
+
+out_data_dir = data_dir + 'r%dx%d_t%.1fx%.1fx%.1f/' % (
+    int(rot_range[0]), int(rot_range[1]),
+    trans_range[0], trans_range[1], trans_range[2]
+)
+
+dataset_version = 0
+while True:
+    out_trainset_name = f'train_{dataset_version}'
+    out_testset_name = f'test_{dataset_version}'
+    if not os.path.exists(out_data_dir + out_trainset_name):
+        break
+    dataset_version += 1
+
+
+def in_range(val, range): return val >= -range / 2 and val <= range / 2
+
+
+views = []
+for i in range(len(dataset_desc['view_centers'])):
+    if in_range(dataset_desc['view_rots'][i][0], rot_range[0]) and \
+            in_range(dataset_desc['view_rots'][i][1], rot_range[1]) and \
+            in_range(dataset_desc['view_centers'][i][0], trans_range[0]) and \
+            in_range(dataset_desc['view_centers'][i][1], trans_range[1]) and \
+            in_range(dataset_desc['view_centers'][i][2], trans_range[2]):
+        views.append(i)
+
+if len(views) < 100:
+    print(f'Number of views in range is too small ({len(views)})')
+    exit()
+
+views = np.random.permutation(views)
+n_train_views = int(len(views) * args.trainset_ratio)
+train_views = np.sort(views[:n_train_views])
+test_views = np.sort(views[n_train_views:])
+
+print('Train set views: ', len(train_views))
+print('Test set views: ', len(test_views))
+
+def create_subset(views, out_desc_name):
+    views = views.tolist()
+    subset_desc = dataset_desc.copy()
+    subset_desc['view_file_pattern'] = \
+        f"{out_desc_name}/{dataset_desc['view_file_pattern'].split('/')[-1]}"
+    subset_desc['range'] = {
+        'min': list(-filter_range / 2),
+        'max': list(filter_range / 2)
+    }
+    subset_desc['samples'] = [int(len(views))]
+    subset_desc['views'] = views
+    subset_desc['view_centers'] = np.array(dataset_desc['view_centers'])[views].tolist()
+    subset_desc['view_rots'] = np.array(dataset_desc['view_rots'])[views].tolist()
+
+    with open(os.path.join(out_data_dir, f'{out_desc_name}.json'), 'w') as fp:
+        json.dump(subset_desc, fp, indent=4)
+    misc.create_dir(os.path.join(out_data_dir, out_desc_name))
+    for i in range(len(views)):
+        os.symlink(os.path.join('../../', dataset_desc['view_file_pattern'] % views[i]),
+                   os.path.join(out_data_dir, subset_desc['view_file_pattern'] % views[i]))
+
+
+misc.create_dir(out_data_dir)
+create_subset(train_views, out_trainset_name)
+create_subset(train_views, out_testset_name)
diff --git a/data/split_dataset.py b/data/split_dataset.py
new file mode 100644
index 0000000..09a614b
--- /dev/null
+++ b/data/split_dataset.py
@@ -0,0 +1,86 @@
+import json
+import sys
+import os
+import argparse
+import numpy as np
+import torch
+
+sys.path.append(os.path.abspath(sys.path[0] + '/../'))
+
+from utils import misc
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-o', '--output', type=str, default='train1')
+parser.add_argument('dataset', type=str)
+args = parser.parse_args()
+
+
+data_desc_path = args.dataset
+data_desc_name = os.path.splitext(os.path.basename(data_desc_path))[0]
+data_dir = os.path.dirname(data_desc_path) + '/'
+
+with open(data_desc_path, 'r') as fp:
+    dataset_desc = json.load(fp)
+
+indices = torch.arange(len(dataset_desc['view_centers'])).view(dataset_desc['samples'])
+
+idx = 0
+'''
+for i in range(3):
+    for j in range(2):
+        out_desc_name = f'part{idx:d}'
+        out_desc = dataset_desc.copy()
+        out_desc['view_file_pattern'] = f'{out_desc_name}/view_%04d.png'
+        n_x = out_desc['samples'][3] // 3
+        n_y = out_desc['samples'][4] // 2
+        views = indices[..., i * n_x:(i + 1) * n_x, j * n_y:(j + 1) * n_y].flatten().tolist()
+        out_desc['samples'] = [len(views)]
+        out_desc['views'] = views
+        out_desc['view_centers'] = np.array(dataset_desc['view_centers'])[views].tolist()
+        out_desc['view_rots'] = np.array(dataset_desc['view_rots'])[views].tolist()
+        with open(os.path.join(data_dir, f'{out_desc_name}.json'), 'w') as fp:
+            json.dump(out_desc, fp, indent=4)
+        misc.create_dir(os.path.join(data_dir, out_desc_name))
+        for k in range(len(views)):
+            os.symlink(os.path.join('..', dataset_desc['view_file_pattern'] % views[k]),
+                    os.path.join(data_dir, out_desc['view_file_pattern'] % views[k]))
+        idx += 1
+'''
+
+'''
+for xi in range(0, 4, 2):
+    for yi in range(0, 4, 2):
+        for zi in range(0, 4, 2):
+            out_desc_name = f'part{idx:d}'
+            out_desc = dataset_desc.copy()
+            out_desc['view_file_pattern'] = f'{out_desc_name}/view_%04d.png'
+            views = indices[xi:xi + 2, yi:yi + 2, zi:zi + 2].flatten().tolist()
+            out_desc['samples'] = [len(views)]
+            out_desc['views'] = views
+            out_desc['view_centers'] = np.array(dataset_desc['view_centers'])[views].tolist()
+            out_desc['view_rots'] = np.array(dataset_desc['view_rots'])[views].tolist()
+            with open(os.path.join(data_dir, f'{out_desc_name}.json'), 'w') as fp:
+                json.dump(out_desc, fp, indent=4)
+            misc.create_dir(os.path.join(data_dir, out_desc_name))
+            for k in range(len(views)):
+                os.symlink(os.path.join('..', dataset_desc['view_file_pattern'] % views[k]),
+                           os.path.join(data_dir, out_desc['view_file_pattern'] % views[k]))
+            idx += 1
+'''
+from itertools import product
+out_desc_name = args.output
+out_desc = dataset_desc.copy()
+out_desc['view_file_pattern'] = f"{out_desc_name}/{dataset_desc['view_file_pattern'].split('/')[-1]}"
+views = []
+for idx in product([0, 2, 4], [0, 2, 4], [0, 2, 4], list(range(9)), [1]):#, [0, 2, 3, 5], [1, 2, 3, 4]):
+    views += indices[idx].flatten().tolist()
+out_desc['samples'] = [len(views)]
+out_desc['views'] = views
+out_desc['view_centers'] = np.array(dataset_desc['view_centers'])[views].tolist()
+out_desc['view_rots'] = np.array(dataset_desc['view_rots'])[views].tolist()
+with open(os.path.join(data_dir, f'{out_desc_name}.json'), 'w') as fp:
+    json.dump(out_desc, fp, indent=4)
+misc.create_dir(os.path.join(data_dir, out_desc_name))
+for k in range(len(views)):
+    os.symlink(os.path.join('..', dataset_desc['view_file_pattern'] % views[k]),
+               os.path.join(data_dir, out_desc['view_file_pattern'] % views[k]))
-- 
GitLab