add change loss layer

This commit is contained in:
Wlad 2021-02-17 16:01:18 +01:00
parent 43f7463ac1
commit 91bc04a36c
5 changed files with 40 additions and 15 deletions

View File

@ -1,5 +1,5 @@
output:
rootDir: "./tests/09-02-21"
rootDir: "./tests/17-02-21"
prefix: ""
smpl:
modelRootDir: ./models
@ -45,15 +45,15 @@ pose:
maxCollisions: 8
sigma: 0.5
changeLoss:
enabled: false
weight: 0.2
enabled: true
weight: 2.0
confWeights:
enabled: false
vposerPath: "./vposer_v1_0"
temporal:
enabled: false
iterations: 5
lr: 0.1
enabled: true
iterations: 50
lr: 0.01
preview:
enable: true,
keypoins:

View File

@ -8,8 +8,8 @@ from utils.general import *
from renderer import *
from utils.general import rename_files, get_new_filename
START_IDX = 60 # starting index of the frame to optimize for
FINISH_IDX = 70 # choose a big number to optimize for all frames in samples directory
START_IDX = 1 # starting index of the frame to optimize for
FINISH_IDX = 100 # choose a big number to optimize for all frames in samples directory
# if False, only run already saved animation without optimization
RUN_OPTIMIZATION = True
@ -41,7 +41,7 @@ if RUN_OPTIMIZATION:
verbose=False,
offscreen=True,
save_to_file=True,
interpolate=True
interpolate=False
)
# TODO: use current body pose and camera transform for next optimization?
@ -89,5 +89,5 @@ else:
video_name = getfilename_from_conf(
config) + "-" + str(START_IDX) + "-" + str(FINISH_IDX)
#video_from_pkl(anim_file, video_name, config)
replay_animation(anim_file, interpolated=True)
video_from_pkl(anim_file, video_name, config)
# replay_animation(anim_file, interpolated=True)

View File

@ -9,6 +9,14 @@ import smplx
import re
def is_loss_enabled(config, name):
return config['pose'][name]['enabled']
def toggle_loss_enabled(config, name, value):
config['pose'][name]['enabled'] = value
def get_loss_conf(config, name):
return config['pose'][name]

View File

@ -1,4 +1,5 @@
# library imports
from modules.utils import is_loss_enabled, toggle_loss_enabled
import os
import pickle
import torch
@ -39,6 +40,13 @@ def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), d
# render camera to the scene
camera.setup_visualization(r.init_keypoints, r.keypoints)
# change loss requires for at least one pass to have completed
# we disabled this loss if enabled at all for the first pass
change_loss_disabled = False
if is_loss_enabled(config, 'changeLoss') and initial_pose is None:
change_loss_disabled = True
toggle_loss_enabled(config, 'changeLoss', False)
# train for pose
best_out, cam_trans, loss_history, step_imgs, loss_components = train_pose_with_conf(
config=config,
@ -52,6 +60,10 @@ def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), d
render_steps=(offscreen or interactive)
)
# make sure change loss is enabled for the next sample
if change_loss_disabled:
toggle_loss_enabled(config, 'changeLoss', True)
# if display_result and interactive:
# r.wait_for_close()
@ -73,7 +85,7 @@ def create_animation(dataset, config, start_idx=0, end_idx=None, offscreen=False
config['pose']['lr'] = config['pose']['temporal']['lr']
config['pose']['iterations'] = config['pose']['temporal']['iterations']
best_out, cam_trans, train_loss, step_imgs = optimize_sample(
best_out, cam_trans, train_loss, step_imgs, loss_components = optimize_sample(
idx,
dataset,
config,

View File

@ -6,6 +6,7 @@ from tqdm import tqdm
import numpy as np
from scipy import interpolate
def make_video(images, video_name: str, fps=5, ext: str = "mp4"):
images = np.array(images)
width = images.shape[2]
@ -53,6 +54,7 @@ def save_to_video(poses, video_name, config, fps=30, interpolated=False):
make_video(frames, video_name, fps)
def interpolate_poses(poses, num_intermediate=5):
"""
Interpolate vertices and cameras between pairs of frames by adding intermediate results
@ -79,14 +81,17 @@ def interpolate_poses(poses, num_intermediate=5):
f1 = interpolate.interp1d(x, poses_pair, axis=0)
f2 = interpolate.interp1d(x, camera_pair, axis=0)
evenly_spaced_points = np.linspace(x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)
evenly_spaced_points = np.linspace(
x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)
new_frames = f1(evenly_spaced_points)
new_cameras = f2(evenly_spaced_points)
arr = [(new_frames[i], new_cameras[i]) for i in range(new_frames.shape[0])]
arr = [(new_frames[i], new_cameras[i])
for i in range(new_frames.shape[0])]
if 0 < i < len(poses) - 1:
arr.pop(0) # remove first frame that was already added in the last interpolation
# remove first frame that was already added in the last interpolation
arr.pop(0)
new_poses += arr
return new_poses