# Configure environment ## 1. Install Conda packages: * Pytorch with CUDA ```bash $ conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch ``` Or ref to https://pytorch.org/get-started/locally/ for install guide * matplotlib * tensorboard * tqdm * configargparse * (Optional) dash ```bash $ conda install dash pandas ``` ## 2. Install Pip packages: * pyGlm * tensorboardX * torch_tb_profiler * opencv-python * ipympl * lpips * (optional) thop * (optional) ConcurrentLogHandler ## 4. Build extension "clib._ext" ```bash $ python setup.py build_ext ``` If build successed, a _ext.\*.so will be generated under build/lib.\*/clib directory. Move this file to clib/. ## 4. (Optional) Install FFMpeg with Extra Codecs: ```bash sudo apt-get update -qq && sudo apt-get -y install \ autoconf \ automake \ build-essential \ cmake \ git-core \ libass-dev \ libfreetype6-dev \ libgnutls28-dev \ libsdl2-dev \ libtool \ libva-dev \ libvdpau-dev \ libvorbis-dev \ libxcb1-dev \ libxcb-shm0-dev \ libxcb-xfixes0-dev \ meson \ ninja-build \ pkg-config \ texinfo \ wget \ yasm \ zlib1g-dev \ libunistring-dev \ libvpx-dev \ libfdk-aac-dev \ libmp3lame-dev \ libopus-dev \ nasm \ libx264-dev \ libx265-dev \ libnuma-dev mkdir -p ~/ffmpeg_sources ~/bin cd ~/ffmpeg_sources && \ wget -O ffmpeg-snapshot.tar.bz2 https://ffmpeg.org/releases/ffmpeg-snapshot.tar.bz2 && \ tar xjvf ffmpeg-snapshot.tar.bz2 cd ffmpeg && \ PATH="$HOME/bin:$PATH" PKG_CONFIG_PATH="$HOME/ffmpeg_build/lib/pkgconfig" ./configure \ --prefix="$HOME/ffmpeg_build" \ --pkg-config-flags="--static" \ --extra-cflags="-I$HOME/ffmpeg_build/include" \ --extra-ldflags="-L$HOME/ffmpeg_build/lib" \ --extra-libs="-lpthread -lm" \ --bindir="$HOME/bin" \ --enable-gpl \ --enable-gnutls \ --enable-libass \ --enable-libfdk-aac \ --enable-libfreetype \ --enable-libmp3lame \ --enable-libopus \ --enable-libvorbis \ --enable-libvpx \ --enable-libx264 \ --enable-libx265 \ --enable-nonfree && \ PATH="$HOME/bin:$PATH" make && \ make install && \ hash -r ``` # Useful commands ## 1. Video generate: ```bash $ ffmpeg -y -r 50 -i %04d.png -c:v libx264 -vframes 600 ../classroom_hmd_mono_hint.mp4 ``` ## 2. Extract frames: ```bash $ ffmpeg -i -f image2 -q:v 2 -vf fps= /image%04d.png ``` ## 3. Convert onnx to tensorRT ```bash $ trtexec --onnx=in.onnx --fp16 --saveEngine=out.trt --workspace=4096 ``` ## 4. Generate dataset of specific path ```bash $ python tools/data/gen_seq.py -s helix|look_around|scan_around -n --ref ```