mirror of
https://github.com/gosticks/body-pose-animation.git
synced 2025-10-16 11:45:42 +00:00
interpolation of frames init
This commit is contained in:
parent
b903924503
commit
a8a7d19c26
@ -1,10 +1,7 @@
|
||||
import pickle
|
||||
import time
|
||||
from train import create_animation
|
||||
from tqdm import tqdm
|
||||
from utils.video import make_video, video_from_pkl
|
||||
import torch
|
||||
|
||||
from utils.video import video_from_pkl
|
||||
from dataset import SMPLyDataset
|
||||
from model import *
|
||||
from utils.general import *
|
||||
@ -27,20 +24,6 @@ dataset = SMPLyDataset.from_config(config)
|
||||
model = SMPLyModel.model_from_conf(config)
|
||||
|
||||
|
||||
def get_next_frame(idx):
|
||||
"""
|
||||
Get keypoints and image_path of the frame given index.
|
||||
|
||||
:param idx: index of the frame
|
||||
:return: tuple of keypoints, conf and image path
|
||||
"""
|
||||
keypoints, keypoints_conf = dataset[idx]
|
||||
if keypoints is None:
|
||||
return
|
||||
image_path = dataset.get_image_path(idx)
|
||||
return keypoints, keypoints_conf, image_path
|
||||
|
||||
|
||||
# Rename files in samples directory to uniform format
|
||||
if config['data']['renameFiles']:
|
||||
rename_files(config['data']['rootDir'] + "/")
|
||||
@ -57,13 +40,14 @@ if RUN_OPTIMIZATION:
|
||||
FINISH_IDX,
|
||||
verbose=False,
|
||||
offscreen=True,
|
||||
save_to_file=True
|
||||
save_to_file=True,
|
||||
interpolate=True
|
||||
)
|
||||
|
||||
# TODO: use current body pose and camera transform for next optimization?
|
||||
|
||||
|
||||
def replay_animation(file, start_frame=0, end_frame=None, with_background=False, fps=30):
|
||||
def replay_animation(file, start_frame=0, end_frame=None, with_background=False, fps=30, interpolated=False):
|
||||
r = Renderer()
|
||||
r.start()
|
||||
|
||||
@ -88,7 +72,7 @@ def replay_animation(file, start_frame=0, end_frame=None, with_background=False,
|
||||
# r.render_image_from_path(img_path, name="image", scale=est_scale)
|
||||
|
||||
r.render_model_with_tfs(model_anim, body_pose, keep_pose=True,
|
||||
render_joints=False, transforms=camera_transform)
|
||||
render_joints=False, transforms=camera_transform, interpolated=interpolated)
|
||||
time.sleep(1 / fps)
|
||||
|
||||
|
||||
@ -105,5 +89,5 @@ else:
|
||||
video_name = getfilename_from_conf(
|
||||
config) + "-" + str(START_IDX) + "-" + str(FINISH_IDX)
|
||||
|
||||
video_from_pkl(anim_file, video_name, config)
|
||||
replay_animation(anim_file)
|
||||
#video_from_pkl(anim_file, video_name, config)
|
||||
replay_animation(anim_file, interpolated=True)
|
||||
|
||||
13
renderer.py
13
renderer.py
@ -193,14 +193,15 @@ class Renderer:
|
||||
def render_model_with_tfs(
|
||||
self,
|
||||
model: SMPLLayer,
|
||||
model_out: SMPL,
|
||||
model_out,
|
||||
color=[1.0, 0.3, 0.3, 0.8],
|
||||
replace=True,
|
||||
keep_pose=True,
|
||||
render_joints=True,
|
||||
transforms=None
|
||||
transforms=None,
|
||||
interpolated=False
|
||||
):
|
||||
if model_out is None:
|
||||
if model_out is None and not interpolated:
|
||||
model_out = model()
|
||||
|
||||
if keep_pose:
|
||||
@ -208,14 +209,14 @@ class Renderer:
|
||||
# if node is not None:
|
||||
#original_pose = node.pose
|
||||
|
||||
self.render_joints(model_out.joints.detach(
|
||||
).cpu().numpy().squeeze(), transforms=transforms)
|
||||
if not interpolated:
|
||||
self.render_joints(model_out.joints.detach().cpu().numpy().squeeze(), transforms=transforms)
|
||||
|
||||
self.remove_from_group("body", "body_mesh")
|
||||
|
||||
self.acquire()
|
||||
node = render_model_with_tfs(self.scene, model, model_out,
|
||||
color, "body_mesh", replace=replace, transforms=transforms)
|
||||
color, "body_mesh", replace=replace, transforms=transforms, interpolated=interpolated)
|
||||
self.release()
|
||||
|
||||
self.add_to_group("body", node)
|
||||
|
||||
9
train.py
9
train.py
@ -2,13 +2,13 @@
|
||||
import os
|
||||
import pickle
|
||||
import torch
|
||||
from utils.video import make_video
|
||||
from tqdm.auto import trange
|
||||
|
||||
# local imports
|
||||
from train_pose import train_pose_with_conf
|
||||
from model import SMPLyModel
|
||||
from utils.general import get_new_filename, getfilename_from_conf, setup_training
|
||||
from utils.general import getfilename_from_conf, setup_training
|
||||
from utils.video import interpolate_poses
|
||||
from camera_estimation import TorchCameraEstimate
|
||||
|
||||
|
||||
@ -58,7 +58,7 @@ def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), d
|
||||
return best_out, cam_trans, loss_history, step_imgs
|
||||
|
||||
|
||||
def create_animation(dataset, config, start_idx=0, end_idx=None, device=torch.device('cpu'), dtype=torch.float32, offscreen=False, verbose=False, save_to_file=False):
|
||||
def create_animation(dataset, config, start_idx=0, end_idx=None, offscreen=False, verbose=False, save_to_file=False, interpolate=False):
|
||||
model_outs = []
|
||||
use_temporal_data = config['pose']['temporal']['enabled']
|
||||
if end_idx is None:
|
||||
@ -100,6 +100,9 @@ def create_animation(dataset, config, start_idx=0, end_idx=None, device=torch.de
|
||||
if use_temporal_data:
|
||||
initial_pose = best_out.body_pose.detach().clone().cpu() # .to(device=device)
|
||||
|
||||
if interpolate:
|
||||
model_outs = interpolate_poses(model_outs)
|
||||
|
||||
file_path = None
|
||||
|
||||
if save_to_file:
|
||||
|
||||
@ -40,9 +40,15 @@ def render_model_with_tfs(
|
||||
name=None,
|
||||
replace=False,
|
||||
pose=None,
|
||||
transforms=None
|
||||
transforms=None,
|
||||
interpolated=False
|
||||
):
|
||||
vertices = model_out.vertices.detach().cpu().numpy().squeeze()
|
||||
|
||||
if not interpolated:
|
||||
vertices = model_out.vertices.detach().cpu().numpy().squeeze()
|
||||
else:
|
||||
# Interpolated frames are passed as a direct array, instead of SMPLXOutput
|
||||
vertices = model_out
|
||||
|
||||
# set vertex colors, maybe use this to highlight accuracies
|
||||
vertex_colors = np.ones([vertices.shape[0], 4]) * color
|
||||
|
||||
@ -4,7 +4,7 @@ from renderer import DefaultRenderer
|
||||
import cv2
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
|
||||
from scipy import interpolate
|
||||
|
||||
def make_video(images, video_name: str, fps=5, ext: str = "mp4"):
|
||||
images = np.array(images)
|
||||
@ -36,7 +36,7 @@ def video_from_pkl(filename, video_name, config, ext: str = "mp4"):
|
||||
save_to_video(model_outs, video_name, config)
|
||||
|
||||
|
||||
def save_to_video(poses, video_name, config, fps=30):
|
||||
def save_to_video(poses, video_name, config, fps=30, interpolated=False):
|
||||
r = DefaultRenderer(
|
||||
offscreen=True
|
||||
)
|
||||
@ -48,7 +48,45 @@ def save_to_video(poses, video_name, config, fps=30):
|
||||
|
||||
for body_pose, cam_trans in tqdm(poses):
|
||||
r.render_model_with_tfs(model_anim, body_pose, keep_pose=True,
|
||||
render_joints=False, transforms=cam_trans)
|
||||
render_joints=False, transforms=cam_trans, interpolated=interpolated)
|
||||
frames.append(r.get_snapshot())
|
||||
|
||||
make_video(frames, video_name, fps)
|
||||
|
||||
def interpolate_poses(poses, num_intermediate=5):
|
||||
"""
|
||||
Interpolate vertices and cameras between pairs of frames by adding intermediate results
|
||||
|
||||
:param poses: optimized poses
|
||||
:param num_intermediate: amount of intermediate results to insert between each pair of frames
|
||||
:return: interpolated poses, list of tuples (body_pose, camera_pose)
|
||||
"""
|
||||
new_poses = []
|
||||
for i in range(len(poses) - 1):
|
||||
if len(poses) < 2:
|
||||
return poses
|
||||
else:
|
||||
# Shape of one matrix of vertices = torch.Size([1, 10475, 3])
|
||||
pose_1 = poses[i][0].vertices.detach().cpu().numpy()
|
||||
pose_2 = poses[i + 1][0].vertices.detach().cpu().numpy()
|
||||
poses_pair = np.concatenate((pose_1, pose_2), axis=0)
|
||||
|
||||
camera_1 = np.expand_dims(poses[i][1], axis=0)
|
||||
camera_2 = np.expand_dims(poses[i + 1][1], axis=0)
|
||||
camera_pair = np.concatenate((camera_1, camera_2), axis=0)
|
||||
|
||||
x = np.arange(poses_pair.shape[0])
|
||||
f1 = interpolate.interp1d(x, poses_pair, axis=0)
|
||||
f2 = interpolate.interp1d(x, camera_pair, axis=0)
|
||||
|
||||
evenly_spaced_points = np.linspace(x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)
|
||||
|
||||
new_frames = f1(evenly_spaced_points)
|
||||
new_cameras = f2(evenly_spaced_points)
|
||||
|
||||
arr = [(new_frames[i], new_cameras[i]) for i in range(new_frames.shape[0])]
|
||||
if 0 < i < len(poses) - 1:
|
||||
arr.pop(0) # remove first frame that was already added in the last interpolation
|
||||
new_poses += arr
|
||||
|
||||
return new_poses
|
||||
|
||||
Loading…
Reference in New Issue
Block a user