This commit is contained in:
Wlad 2021-02-08 15:33:27 +01:00
parent 809f60bfae
commit d1cdece896
12 changed files with 223 additions and 181 deletions

18
animate.py Normal file
View File

@ -0,0 +1,18 @@
import pickle
import time
from tqdm import tqdm
from utils.render import make_video
import torch
from tqdm.auto import trange
from dataset import SMPLyDataset
from model import *
from utils.general import *
from renderer import *
from camera_estimation import TorchCameraEstimate
from modules.camera import SimpleCamera
from train_pose import train_pose_with_conf
from utils.general import rename_files, get_new_filename
def animate_with_conf(config, start=0, end=None):

View File

@ -19,7 +19,6 @@ class CameraEstimate:
def __init__( def __init__(
self, self,
model: smplx.SMPL, model: smplx.SMPL,
dataset,
keypoints, keypoints,
renderer, renderer,
image_path=None, image_path=None,
@ -31,7 +30,6 @@ class CameraEstimate:
self.use_progress_bar = use_progress_bar self.use_progress_bar = use_progress_bar
self.verbose = verbose self.verbose = verbose
self.model = model self.model = model
self.dataset = dataset
self.output_model = model(return_verts=True) self.output_model = model(return_verts=True)
self.renderer = renderer self.renderer = renderer
self.dtype = dtype self.dtype = dtype

View File

@ -25,7 +25,6 @@ init_keypoints, init_joints, keypoints, conf, est_scale, r, img_path = setup_tra
camera = TorchCameraEstimate( camera = TorchCameraEstimate(
model, model,
dataset=dataset,
keypoints=keypoints, keypoints=keypoints,
renderer=Renderer(), renderer=Renderer(),
device=device, device=device,

View File

@ -1,14 +1,9 @@
# library imports # library imports
from utils.render import make_video from train import optimize_sample
import torch
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
# local imports # local imports
from train_pose import train_pose_with_conf from utils.general import load_config
from modules.camera import SimpleCamera
from model import SMPLyModel
from utils.general import getfilename_from_conf, load_config, setup_training
from camera_estimation import TorchCameraEstimate
from dataset import SMPLyDataset from dataset import SMPLyDataset
# load and select sample # load and select sample
@ -16,50 +11,14 @@ config = load_config()
dataset = SMPLyDataset.from_config(config=config) dataset = SMPLyDataset.from_config(config=config)
sample_index = 0 sample_index = 0
# prepare data and SMPL model
model = SMPLyModel.model_from_conf(config)
init_keypoints, init_joints, keypoints, conf, est_scale, r, img_path = setup_training(
model=model,
renderer=True,
dataset=dataset,
sample_index=sample_index,
offscreen=True
)
# configure PyTorch device and format
dtype = torch.float32
device = torch.device('cpu')
camera = TorchCameraEstimate(
model,
dataset=dataset,
keypoints=keypoints,
renderer=r,
device=device,
dtype=dtype,
image_path=img_path,
est_scale=est_scale
)
# render camera to the scene
camera.setup_visualization(r.init_keypoints, r.keypoints)
# train for pose # train for pose
result, best, train_loss, step_imgs = train_pose_with_conf( pose, train_loss, step_imgs = optimize_sample(
config=config, sample_index,
model=model, dataset,
keypoints=keypoints, config
keypoint_conf=conf,
camera=camera,
renderer=r,
device=device,
) )
make_video(step_imgs, "test.avi")
# color = r.get_snapshot() # color = r.get_snapshot()
# plt.imshow(color) # plt.imshow(color)
# plt.show() # plt.show()

View File

@ -1,27 +1,31 @@
import pickle import pickle
import time import time
from utils.render import make_video from train import create_animation
from tqdm import tqdm
from utils.video import make_video, video_from_pkl
import torch import torch
from tqdm.auto import trange
from dataset import SMPLyDataset from dataset import SMPLyDataset
from model import * from model import *
from utils.general import * from utils.general import *
from renderer import * from renderer import *
from camera_estimation import TorchCameraEstimate
from modules.camera import SimpleCamera
from train_pose import train_pose_with_conf
from utils.general import rename_files, get_new_filename from utils.general import rename_files, get_new_filename
START_IDX = 0 # starting index of the frame to optimize for START_IDX = 0 # starting index of the frame to optimize for
FINISH_IDX = 50 # choose a big number to optimize for all frames in samples directory FINISH_IDX = 2 # choose a big number to optimize for all frames in samples directory
# if False, only run already saved animation without optimization # if False, only run already saved animation without optimization
RUN_OPTIMIZATION = True RUN_OPTIMIZATION = False
final_poses = [] # optimized poses array that is saved for playing the animation
result_image = [] result_image = []
idx = START_IDX idx = START_IDX
device = torch.device('cpu')
dtype = torch.float32
config = load_config()
dataset = SMPLyDataset.from_config(config)
model = SMPLyModel.model_from_conf(config)
def get_next_frame(idx): def get_next_frame(idx):
""" """
@ -37,86 +41,41 @@ def get_next_frame(idx):
return keypoints, keypoints_conf, image_path return keypoints, keypoints_conf, image_path
device = torch.device('cpu')
dtype = torch.float32
config = load_config()
dataset = SMPLyDataset.from_config(config)
model = SMPLyModel.model_from_conf(config)
samples_dir = config['data']['rootDir']
# Rename files in samples directory to uniform format # Rename files in samples directory to uniform format
if config['data']['renameFiles']: if config['data']['renameFiles']:
rename_files(samples_dir + "/") rename_files(config['data']['rootDir'] + "/")
results_dir = config['output']['rootDir']
result_prefix = config['output']['prefix']
model_out = model()
joints = model_out.joints.detach().cpu().numpy().squeeze()
''' '''
Optimization part without visualization Optimization part without visualization
''' '''
if RUN_OPTIMIZATION: if RUN_OPTIMIZATION:
for idx in trange(FINISH_IDX, desc='Optimizing'): final_poses, filename = create_animation(
idx = START_IDX + idx dataset,
init_keypoints, init_joints, keypoints, conf, est_scale, r, img_path = setup_training( config,
model=model, START_IDX,
renderer=True, FINISH_IDX,
offscreen=True, offscreen=True,
dataset=dataset, save_to_file=True
sample_index=idx )
)
r.start()
cam = TorchCameraEstimate( def save_to_video(poses, video_name, config, fps=30):
model, r = DefaultRenderer(
dataset=dataset, offscreen=True
keypoints=keypoints, )
renderer=None, r.start()
device=torch.device('cpu'),
dtype=torch.float32,
image_path=img_path,
est_scale=est_scale,
use_progress_bar=False,
verbose=False
)
# print("\nCamera optimization of frame", idx, "is finished.") model_anim = SMPLyModel.model_from_conf(config)
cur_pose, final_pose, loss, frames = train_pose_with_conf( frames = []
config=config,
model=model,
keypoints=keypoints,
keypoint_conf=conf,
camera=cam,
renderer=r,
device=device,
use_progress_bar=False
)
camera_transformation, camera_int, camera_params = cam.get_results() for body_pose, cam_trans in tqdm(poses):
r.render_model_with_tfs(model_anim, body_pose, keep_pose=True,
render_joints=False, transforms=cam_trans)
frames.append(r.get_snapshot())
# print("\nPose optimization of frame", idx, "is finished.") make_video(frames, video_name, fps)
R = camera_transformation.numpy().squeeze()
idx += 1
# append optimized pose and camera transformation to the array
final_poses.append((final_pose, R))
print("Optimization of", idx, "frames finished")
'''
Save final_poses array into results folder as a pickle dump
'''
filename = results_dir + get_new_filename()
print("Saving results to", filename)
with open(filename, "wb") as fp:
pickle.dump(final_poses, fp)
print("Results have been saved to", filename)
# TODO: use current body pose and camera transform for next optimization? # TODO: use current body pose and camera transform for next optimization?
@ -150,37 +109,15 @@ def replay_animation(file, start_frame=0, end_frame=None, with_background=False,
time.sleep(1 / fps) time.sleep(1 / fps)
def video_from_pkl(filename, video_name):
with open(filename, "rb") as fp:
final_poses = pickle.load(fp)
save_to_video(final_poses, video_name)
def save_to_video(poses, video_name, fps=30):
r = DefaultRenderer(
offscreen=True
)
r.start()
model_anim = SMPLyModel.model_from_conf(config)
frames = []
for body_pose, cam_trans in tqdm(poses):
r.render_model_with_tfs(model_anim, body_pose, keep_pose=True,
render_joints=False, transforms=cam_trans)
frames.append(r.get_snapshot())
make_video(frames, video_name, fps)
''' '''
Play the animation. Play the animation.
''' '''
anim_file = results_dir + result_prefix + "0.pkl"
if RUN_OPTIMIZATION: if RUN_OPTIMIZATION:
anim_file = filename anim_file = filename
else:
results_dir = config['output']['rootDir']
result_prefix = config['output']['prefix']
anim_file = results_dir + result_prefix + "3.pkl"
video_from_pkl(anim_file, "test-anim.avi") video_from_pkl(anim_file, "test-anim.avi", config)
replay_animation(anim_file) replay_animation(anim_file)

View File

@ -40,5 +40,4 @@ class AnglePriorsLoss(nn.Module):
angles = pose[:, self.angle_idx] angles = pose[:, self.angle_idx]
# compute cost based not exponential of angle * direction # compute cost based not exponential of angle * direction
# then use MSE for cost return torch.exp(angles * self.angle_directions).pow(2).sum()
return torch.exp(angles * self.angle_directions).pow(2).sum()

View File

@ -10,7 +10,7 @@ class AngleClipper(nn.Module):
dtype=torch.float32, dtype=torch.float32,
angle_idx=[24, 10, 9], angle_idx=[24, 10, 9],
# directions=[-1, 1, 1, 1], # directions=[-1, 1, 1, 1],
weights=[1.0, 1.0, 1.0] weight=0.01
): ):
super(AngleClipper, self).__init__() super(AngleClipper, self).__init__()
@ -28,8 +28,8 @@ class AngleClipper(nn.Module):
# create buffer for weights # create buffer for weights
self.register_buffer( self.register_buffer(
"weights", "weight",
torch.tensor(weights, dtype=dtype).to(device=device) torch.tensor(weight, dtype=dtype).to(device=device)
) )
def forward(self, pose): def forward(self, pose):
@ -39,4 +39,4 @@ class AngleClipper(nn.Module):
penalty = angles[torch.abs(angles) > self.limit] penalty = angles[torch.abs(angles) > self.limit]
# get relevant angles # get relevant angles
return penalty.pow(2).sum() * 0.01 return penalty.pow(2).sum() * self.weight

View File

@ -316,6 +316,10 @@ class Renderer:
return color return color
def wait_for_close(self):
while self.viewer.is_active:
pass
class DefaultRenderer(Renderer): class DefaultRenderer(Renderer):
"""Utility class for easier default renderer setup """Utility class for easier default renderer setup

101
train.py Normal file
View File

@ -0,0 +1,101 @@
# library imports
import pickle
import torch
from utils.video import make_video
from tqdm.auto import trange
# local imports
from train_pose import train_pose_with_conf
from model import SMPLyModel
from utils.general import get_new_filename, setup_training
from camera_estimation import TorchCameraEstimate
def optimize_sample(sample_index, dataset, config, device=torch.device('cpu'), dtype=torch.float32, offscreen=False, verbose=False, display_result=False):
# prepare data and SMPL model
model = SMPLyModel.model_from_conf(config)
init_keypoints, init_joints, keypoints, conf, est_scale, r, img_path = setup_training(
model=model,
renderer=True,
dataset=dataset,
sample_index=sample_index,
offscreen=offscreen
)
camera = TorchCameraEstimate(
model,
keypoints=keypoints,
renderer=r,
device=device,
dtype=dtype,
image_path=img_path,
est_scale=est_scale,
verbose=verbose,
use_progress_bar=verbose
)
camera_transformation, camera_int, camera_params = camera.get_results()
if not offscreen:
# render camera to the scene
camera.setup_visualization(r.init_keypoints, r.keypoints)
# train for pose
pose, loss_history, step_imgs = train_pose_with_conf(
config=config,
model=model,
keypoints=keypoints,
keypoint_conf=conf,
camera=camera,
renderer=r,
device=device,
use_progress_bar=verbose
)
if display_result:
r.wait_for_close()
return pose, camera_transformation, loss_history, step_imgs
def create_animation(dataset, config, start_idx=0, end_idx=None, device=torch.device('cpu'), dtype=torch.float32, offscreen=False, verbose=False, save_to_file=False):
final_poses = []
if end_idx is None:
end_idx = len(dataset)
for idx in trange(end_idx - start_idx, desc='Optimizing'):
idx = start_idx + idx
final_pose, cam_trans, train_loss, step_imgs = optimize_sample(
idx,
dataset,
config,
offscreen=True
)
if verbose:
print("Optimization of", idx, "frames finished")
# print("\nPose optimization of frame", idx, "is finished.")
R = cam_trans.numpy().squeeze()
idx += 1
# append optimized pose and camera transformation to the array
final_poses.append((final_pose, R))
filename = None
if save_to_file:
'''
Save final_poses array into results folder as a pickle dump
'''
results_dir = config['output']['rootDir']
result_prefix = config['output']['prefix']
filename = results_dir + get_new_filename()
print("Saving results to", filename)
with open(filename, "wb") as fp:
pickle.dump(final_poses, fp)
print("Results have been saved to", filename)
return final_poses, filename

View File

@ -53,7 +53,6 @@ def train_pose(
# renderer options # renderer options
renderer: Renderer = None, renderer: Renderer = None,
render_steps=True, render_steps=True,
render_offscreen=True,
vposer=None, vposer=None,
@ -209,14 +208,14 @@ def train_pose(
renderer.render_model_with_tfs( renderer.render_model_with_tfs(
model, pose_layer.cur_out, keep_pose=True, transforms=R) model, pose_layer.cur_out, keep_pose=True, transforms=R)
if render_offscreen: if renderer.use_offscreen:
offscreen_step_output.append(renderer.get_snapshot()) offscreen_step_output.append(renderer.get_snapshot())
# renderer.set_group_pose("body", R) # renderer.set_group_pose("body", R)
if use_progress_bar: if use_progress_bar:
pbar.close() pbar.close()
print("Final result:", loss.item()) print("Final result:", loss.item())
return pose_layer.cur_out, best_pose, loss_history, offscreen_step_output return best_pose, loss_history, offscreen_step_output
def train_pose_with_conf( def train_pose_with_conf(

View File

@ -1,28 +1,8 @@
from typing import List, Set, Dict, Tuple, Optional from typing import List, Set, Dict, Tuple, Optional
import numpy as np import numpy as np
import trimesh import trimesh
import pyrender import pyrender
import cv2
from tqdm import tqdm
def make_video(images, video_name: str, fps=5):
images = np.array(images)
width = images.shape[2]
height = images.shape[1]
video = cv2.VideoWriter(
video_name, 0, fps, (width, height), True)
print("creating video with size", width, height)
for idx in tqdm(range(len(images))):
img = images[idx]
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
video.write(im_rgb)
video.release()
def render_model( def render_model(

48
utils/video.py Normal file
View File

@ -0,0 +1,48 @@
import pickle
from model import SMPLyModel
from renderer import DefaultRenderer
import cv2
from tqdm import tqdm
import numpy as np
def make_video(images, video_name: str, fps=5):
images = np.array(images)
width = images.shape[2]
height = images.shape[1]
video = cv2.VideoWriter(
video_name, 0, fps, (width, height), True)
print("creating video with size", width, height)
for idx in tqdm(range(len(images))):
img = images[idx]
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
video.write(im_rgb)
video.release()
def video_from_pkl(filename, video_name, config):
with open(filename, "rb") as fp:
final_poses = pickle.load(fp)
save_to_video(final_poses, video_name, config)
def save_to_video(poses, video_name, config, fps=30):
r = DefaultRenderer(
offscreen=True
)
r.start()
model_anim = SMPLyModel.model_from_conf(config)
frames = []
for body_pose, cam_trans in tqdm(poses):
r.render_model_with_tfs(model_anim, body_pose, keep_pose=True,
render_joints=False, transforms=cam_trans)
frames.append(r.get_snapshot())
make_video(frames, video_name, fps)