add change loss layer

This commit is contained in:
Wlad 2021-02-17 16:01:18 +01:00
parent 43f7463ac1
commit 91bc04a36c
5 changed files with 40 additions and 15 deletions

View File

@ -1,5 +1,5 @@
output: output:
rootDir: "./tests/09-02-21" rootDir: "./tests/17-02-21"
prefix: "" prefix: ""
smpl: smpl:
modelRootDir: ./models modelRootDir: ./models
@ -45,15 +45,15 @@ pose:
maxCollisions: 8 maxCollisions: 8
sigma: 0.5 sigma: 0.5
changeLoss: changeLoss:
enabled: false enabled: true
weight: 0.2 weight: 2.0
confWeights: confWeights:
enabled: false enabled: false
vposerPath: "./vposer_v1_0" vposerPath: "./vposer_v1_0"
temporal: temporal:
enabled: false enabled: true
iterations: 5 iterations: 50
lr: 0.1 lr: 0.01
preview: preview:
enable: true, enable: true,
keypoins: keypoins:

View File

@ -8,8 +8,8 @@ from utils.general import *
from renderer import * from renderer import *
from utils.general import rename_files, get_new_filename from utils.general import rename_files, get_new_filename
START_IDX = 60 # starting index of the frame to optimize for START_IDX = 1 # starting index of the frame to optimize for
FINISH_IDX = 70 # choose a big number to optimize for all frames in samples directory FINISH_IDX = 100 # choose a big number to optimize for all frames in samples directory
# if False, only run already saved animation without optimization # if False, only run already saved animation without optimization
RUN_OPTIMIZATION = True RUN_OPTIMIZATION = True
@ -41,7 +41,7 @@ if RUN_OPTIMIZATION:
verbose=False, verbose=False,
offscreen=True, offscreen=True,
save_to_file=True, save_to_file=True,
interpolate=True interpolate=False
) )
# TODO: use current body pose and camera transform for next optimization? # TODO: use current body pose and camera transform for next optimization?
@ -89,5 +89,5 @@ else:
video_name = getfilename_from_conf( video_name = getfilename_from_conf(
config) + "-" + str(START_IDX) + "-" + str(FINISH_IDX) config) + "-" + str(START_IDX) + "-" + str(FINISH_IDX)
#video_from_pkl(anim_file, video_name, config) video_from_pkl(anim_file, video_name, config)
replay_animation(anim_file, interpolated=True) # replay_animation(anim_file, interpolated=True)

View File

@ -9,6 +9,14 @@ import smplx
import re import re
def is_loss_enabled(config, name):
return config['pose'][name]['enabled']
def toggle_loss_enabled(config, name, value):
config['pose'][name]['enabled'] = value
def get_loss_conf(config, name): def get_loss_conf(config, name):
return config['pose'][name] return config['pose'][name]

View File

@ -1,4 +1,5 @@
# library imports # library imports
from modules.utils import is_loss_enabled, toggle_loss_enabled
import os import os
import pickle import pickle
import torch import torch
@ -39,6 +40,13 @@ def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), d
# render camera to the scene # render camera to the scene
camera.setup_visualization(r.init_keypoints, r.keypoints) camera.setup_visualization(r.init_keypoints, r.keypoints)
# change loss requires for at least one pass to have completed
# we disabled this loss if enabled at all for the first pass
change_loss_disabled = False
if is_loss_enabled(config, 'changeLoss') and initial_pose is None:
change_loss_disabled = True
toggle_loss_enabled(config, 'changeLoss', False)
# train for pose # train for pose
best_out, cam_trans, loss_history, step_imgs, loss_components = train_pose_with_conf( best_out, cam_trans, loss_history, step_imgs, loss_components = train_pose_with_conf(
config=config, config=config,
@ -52,6 +60,10 @@ def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), d
render_steps=(offscreen or interactive) render_steps=(offscreen or interactive)
) )
# make sure change loss is enabled for the next sample
if change_loss_disabled:
toggle_loss_enabled(config, 'changeLoss', True)
# if display_result and interactive: # if display_result and interactive:
# r.wait_for_close() # r.wait_for_close()
@ -73,7 +85,7 @@ def create_animation(dataset, config, start_idx=0, end_idx=None, offscreen=False
config['pose']['lr'] = config['pose']['temporal']['lr'] config['pose']['lr'] = config['pose']['temporal']['lr']
config['pose']['iterations'] = config['pose']['temporal']['iterations'] config['pose']['iterations'] = config['pose']['temporal']['iterations']
best_out, cam_trans, train_loss, step_imgs = optimize_sample( best_out, cam_trans, train_loss, step_imgs, loss_components = optimize_sample(
idx, idx,
dataset, dataset,
config, config,

View File

@ -6,6 +6,7 @@ from tqdm import tqdm
import numpy as np import numpy as np
from scipy import interpolate from scipy import interpolate
def make_video(images, video_name: str, fps=5, ext: str = "mp4"): def make_video(images, video_name: str, fps=5, ext: str = "mp4"):
images = np.array(images) images = np.array(images)
width = images.shape[2] width = images.shape[2]
@ -53,6 +54,7 @@ def save_to_video(poses, video_name, config, fps=30, interpolated=False):
make_video(frames, video_name, fps) make_video(frames, video_name, fps)
def interpolate_poses(poses, num_intermediate=5): def interpolate_poses(poses, num_intermediate=5):
""" """
Interpolate vertices and cameras between pairs of frames by adding intermediate results Interpolate vertices and cameras between pairs of frames by adding intermediate results
@ -79,14 +81,17 @@ def interpolate_poses(poses, num_intermediate=5):
f1 = interpolate.interp1d(x, poses_pair, axis=0) f1 = interpolate.interp1d(x, poses_pair, axis=0)
f2 = interpolate.interp1d(x, camera_pair, axis=0) f2 = interpolate.interp1d(x, camera_pair, axis=0)
evenly_spaced_points = np.linspace(x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1) evenly_spaced_points = np.linspace(
x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)
new_frames = f1(evenly_spaced_points) new_frames = f1(evenly_spaced_points)
new_cameras = f2(evenly_spaced_points) new_cameras = f2(evenly_spaced_points)
arr = [(new_frames[i], new_cameras[i]) for i in range(new_frames.shape[0])] arr = [(new_frames[i], new_cameras[i])
for i in range(new_frames.shape[0])]
if 0 < i < len(poses) - 1: if 0 < i < len(poses) - 1:
arr.pop(0) # remove first frame that was already added in the last interpolation # remove first frame that was already added in the last interpolation
arr.pop(0)
new_poses += arr new_poses += arr
return new_poses return new_poses