mirror of
https://github.com/gosticks/body-pose-animation.git
synced 2025-10-16 11:45:42 +00:00
WIP: scale keypoints to match
This commit is contained in:
parent
8018877fa6
commit
f0b2015f53
@ -124,7 +124,7 @@ class CameraEstimate:
|
||||
init_points_3d = torch.from_numpy(init_points_3d)
|
||||
|
||||
params = [translation, rotation]
|
||||
opt = torch.optim.Adam(params, lr=0.1)
|
||||
opt = torch.optim.Adam(params, lr=0.001)
|
||||
|
||||
def C(params, X):
|
||||
translation = params[0]
|
||||
@ -146,7 +146,7 @@ class CameraEstimate:
|
||||
loss.float()
|
||||
loss.backward()
|
||||
opt.step()
|
||||
stop = loss > 3e-4
|
||||
stop = loss > 3e-2
|
||||
current_pose = self.torch_params_to_pose(params)
|
||||
current_pose = current_pose.detach().numpy()
|
||||
self.renderer.scene.set_pose(self.transformed_points, current_pose)
|
||||
|
||||
@ -12,7 +12,7 @@ from model import *
|
||||
import time
|
||||
# from renderer import *
|
||||
from dataset import *
|
||||
from utils import get_named_joints, estimate_depth
|
||||
from utils import get_named_joints, estimate_scale
|
||||
|
||||
ascii_logo = """\
|
||||
/$$$$$$ /$$ /$$ /$$$$$$$ /$$ /$$ /$$
|
||||
@ -66,10 +66,10 @@ joints = model_out.joints.detach().cpu().numpy().squeeze()
|
||||
# ---------------------------------
|
||||
cam_est_joints_names = ["hip-left", "hip-right",
|
||||
"shoulder-left", "shoulder-right"]
|
||||
est_depth = estimate_depth(joints, keypoints)
|
||||
est_scale = estimate_scale(joints, keypoints)
|
||||
|
||||
# apply depth to keypoints
|
||||
keypoints[:, 2] = -est_depth
|
||||
# apply scaling to keypoints
|
||||
keypoints = keypoints * est_scale
|
||||
|
||||
init_joints = get_named_joints(joints, cam_est_joints_names)
|
||||
init_keypoints = get_named_joints(keypoints, cam_est_joints_names)
|
||||
@ -108,7 +108,7 @@ keyp_torso = torch.Tensor(init_keypoints, device=device)
|
||||
|
||||
learning_rate = 1e-3
|
||||
trans = Transform(dtype, device)
|
||||
proj = CameraProjSimple(dtype, device, -est_depth)
|
||||
proj = CameraProjSimple(dtype, device, 1)
|
||||
optimizer = torch.optim.Adam(trans.parameters(), lr=learning_rate)
|
||||
loss_layer = torch.nn.MSELoss()
|
||||
|
||||
|
||||
@ -66,5 +66,5 @@ class Transform(nn.Module):
|
||||
return transform
|
||||
|
||||
def forward(self, joints):
|
||||
R = self.get_transform_mat().squeeze()
|
||||
R = self.get_transform_mat()
|
||||
return joints @ R + F.pad(self.translation, (0,1), value=1)
|
||||
|
||||
@ -18,8 +18,6 @@ class Renderer:
|
||||
if camera is None:
|
||||
camera = pyrender.OrthographicCamera(ymag=1, xmag=1)
|
||||
|
||||
|
||||
|
||||
if camera_pose is None:
|
||||
camera_pose = np.eye(4)
|
||||
camera_pose[:3, :3] = R.from_rotvec(np.pi/2 * np.array([0, 0, 0])).as_matrix()
|
||||
|
||||
8
utils.py
8
utils.py
@ -130,8 +130,8 @@ def openpose_to_opengl_coords(
|
||||
|
||||
points = np.array([
|
||||
[
|
||||
x / real_width * 2 - 1,
|
||||
-y / real_height * 2 + 1,
|
||||
x / real_width * 5 - 1,
|
||||
-y / real_height * 5 + 1,
|
||||
0
|
||||
] for (x, y, z) in input_data])
|
||||
|
||||
@ -176,11 +176,11 @@ def render_points(scene, points, radius=0.005, color=[0.0, 0.0, 1.0, 1.0], name=
|
||||
tfs = np.tile(np.eye(4), (len(points), 1, 1))
|
||||
tfs[:, :3, 3] = points
|
||||
pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
|
||||
# return the render scene node
|
||||
# return the render scsene node
|
||||
return scene.add(pcl, name=name)
|
||||
|
||||
|
||||
def estimate_depth(joints, keypoints, pairs=[
|
||||
def estimate_scale(joints, keypoints, pairs=[
|
||||
("shoulder-right", "hip-right"),
|
||||
("shoulder-left", "hip-left")
|
||||
], cam_fy=1):
|
||||
|
||||
Loading…
Reference in New Issue
Block a user