-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathang2joint.py
124 lines (105 loc) · 3.83 KB
/
ang2joint.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import torch
def ang2joint(p3d0, pose,
parent={0: -1, 1: 0, 2: 0, 3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9, 13: 9, 14: 9,
15: 12, 16: 13, 17: 14, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21}):
"""
:param p3d0:[batch_size, joint_num, 3]
:param pose:[batch_size, joint_num, 3]
:param parent:
:return:
"""
# model_path = './model.npz'
# params = np.load(model_path, allow_pickle=True)
# kintree_table = params['kintree_table']
batch_num = p3d0.shape[0]
# id_to_col = {kintree_table[1, i]: i
# for i in range(kintree_table.shape[1])}
# parent = {
# i: id_to_col[kintree_table[0, i]]
# for i in range(1, kintree_table.shape[1])
# }
# parent = {1: 0, 2: 0, 3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9, 13: 9, 14: 9, 15: 12, 16: 13,
# 17: 14, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21}
jnum = len(parent.keys())
# v_shaped = torch.tensordot(betas, self.shapedirs, dims=([1], [2])) + self.v_template
# J = torch.matmul(self.J_regressor, v_shaped)
# face_J = v_shaped[:, [333, 2801, 6261], :]
J = p3d0
R_cube_big = rodrigues(pose.contiguous().view(-1, 1, 3)).reshape(batch_num, -1, 3, 3)
results = []
results.append(
with_zeros(torch.cat((R_cube_big[:, 0], torch.reshape(J[:, 0, :], (-1, 3, 1))), dim=2))
)
# for i in range(1, kintree_table.shape[1]):
for i in range(1, jnum):
results.append(
torch.matmul(
results[parent[i]],
with_zeros(
torch.cat(
(R_cube_big[:, i], torch.reshape(J[:, i, :] - J[:, parent[i], :], (-1, 3, 1))),
dim=2
)
)
)
)
stacked = torch.stack(results, dim=1)
J_transformed = stacked[:, :, :3, 3]
return J_transformed
def rodrigues(r):
"""
Rodrigues' rotation formula that turns axis-angle tensor into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation tensor of shape [batch_size * angle_num, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size * angle_num, 3, 3].
"""
eps = r.clone().normal_(std=1e-8)
theta = torch.norm(r + eps, dim=(1, 2), keepdim=True)
# theta = torch.norm(r, dim=(1, 2), keepdim=True) # dim cannot be tuple
theta_dim = theta.shape[0]
r_hat = r / theta
cos = torch.cos(theta)
z_stick = torch.zeros(theta_dim, dtype=torch.float).to(r.device)
m = torch.stack(
(z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
-r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)
m = torch.reshape(m, (-1, 3, 3))
i_cube = (torch.eye(3, dtype=torch.float).unsqueeze(dim=0) \
+ torch.zeros((theta_dim, 3, 3), dtype=torch.float)).to(r.device)
A = r_hat.permute(0, 2, 1)
dot = torch.matmul(A, r_hat)
R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m
return R
def with_zeros(x):
"""
Append a [0, 0, 0, 1] tensor to a [3, 4] tensor.
Parameter:
---------
x: Tensor to be appended.
Return:
------
Tensor after appending of shape [4,4]
"""
ones = torch.tensor(
[[[0.0, 0.0, 0.0, 1.0]]], dtype=torch.float
).expand(x.shape[0], -1, -1).to(x.device)
ret = torch.cat((x, ones), dim=1)
return ret
def pack(x):
"""
Append zero tensors of shape [4, 3] to a batch of [4, 1] shape tensor.
Parameter:
----------
x: A tensor of shape [batch_size, 4, 1]
Return:
------
A tensor of shape [batch_size, 4, 4] after appending.
"""
zeros43 = torch.zeros(
(x.shape[0], x.shape[1], 4, 3), dtype=torch.float).to(x.device)
ret = torch.cat((zeros43, x), dim=3)
return ret