Skip to content

Commit 8fbb1dc

Browse files
committed
Update test code args parser
1 parent fda71b1 commit 8fbb1dc

File tree

4 files changed

+36
-35
lines changed

4 files changed

+36
-35
lines changed

README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ Our method creates both non-photorealistic cartoon animations (top) and natural
3030
- Python environment 3.6
3131
```
3232
conda create -n makeittalk_env python=3.6
33-
conda activate makeittalk
33+
conda activate makeittalk_env
3434
```
3535
- ffmpeg (https://ffmpeg.org/download.html)
3636
```
@@ -55,7 +55,7 @@ Download the following pre-trained models to `examples/ckpt` folder.
5555

5656
## Animate You Portraits!
5757

58-
`Nature human faces / Paintings` (warping through Image-to-image translation module)
58+
### _Nature human faces / Paintings_ (warping through Image-to-image translation module)
5959

6060
- crop your portrait image into size `256x256` and put it under `examples` folder with `.jpg` format.
6161
Make sure the head is almost in the middle (check existing examples for a reference).
@@ -73,7 +73,7 @@ to amply lip motion (in x/y-axis direction) and head motion displacements, defau
7373

7474

7575

76-
`Non-photorealistic cartoon faces` (warping through Delaunay triangulation)
76+
### _Non-photorealistic cartoon faces_ (warping through Delaunay triangulation)
7777

7878
- animate one of the existing puppets
7979

main_end2end.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333

3434
parser = argparse.ArgumentParser()
35-
parser.add_argument('--jpg', type=str, default='examples/{}.jpg'.format(default_head_name))
35+
parser.add_argument('--jpg', type=str, default='{}.jpg'.format(default_head_name))
3636
parser.add_argument('--close_input_face_mouth', default=CLOSE_INPUT_FACE_MOUTH, action='store_true')
3737

3838

@@ -44,8 +44,8 @@
4444
parser.add_argument('--amp_lip_x', type=float, default=2.)
4545
parser.add_argument('--amp_lip_y', type=float, default=2.)
4646
parser.add_argument('--amp_pos', type=float, default=1.)
47-
parser.add_argument('--reuse_train_emb_list', type=list, default=[]) # ['iWeklsXc0H8']) #['45hn7-LXDX8']) #['E_kmpT-EfOg']) #'iWeklsXc0H8', '29k8RtSUjE0', '45hn7-LXDX8',
48-
47+
parser.add_argument('--reuse_train_emb_list', type=str, nargs='+', default=[]) # ['iWeklsXc0H8']) #['45hn7-LXDX8']) #['E_kmpT-EfOg']) #'iWeklsXc0H8', '29k8RtSUjE0', '45hn7-LXDX8',
48+
# --reuse_train_emb_list 45hn7-LXDX8
4949

5050
parser.add_argument('--add_audio_in', default=False, action='store_true')
5151
parser.add_argument('--comb_fan_awing', default=False, action='store_true')
@@ -71,7 +71,7 @@
7171
opt_parser = parser.parse_args()
7272

7373
''' STEP 1: preprocess input single image '''
74-
img =cv2.imread(opt_parser.jpg)
74+
img =cv2.imread('examples/' + opt_parser.jpg)
7575
predictor = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cuda', flip_input=True)
7676
shapes = predictor.get_landmarks(img)
7777
if (not shapes or len(shapes) != 1):
@@ -83,7 +83,8 @@
8383
util.close_input_face_mouth(shape_3d)
8484

8585

86-
''' Additional manual adjustment to input face landmarks (slimmer lips and widers eyes) '''
86+
''' Additional manual adjustment to input face landmarks (slimmer lips and wider eyes) '''
87+
shape_3d[48:, 0] = (shape_3d[48:, 0] - np.mean(shape_3d[48:, 0])) * 0.95 + np.mean(shape_3d[48:, 0])
8788
shape_3d[49:54, 1] += 1.
8889
shape_3d[55:60, 1] -= 1.
8990
shape_3d[[37,38,43,44], 1] -=2
@@ -177,6 +178,6 @@
177178
''' STEP 6: Imag2image translation '''
178179
model = Image_translation_block(opt_parser, single_test=True)
179180
with torch.no_grad():
180-
model.single_test(jpg=img, fls=fl, filename=fls[i], prefix=opt_parser.jpg)
181+
model.single_test(jpg=img, fls=fl, filename=fls[i], prefix=opt_parser.jpg.split('.')[0])
181182
print('finish image2image gen')
182183
os.remove(os.path.join('examples', fls[i]))

main_end2end_cartoon.py

+25-25
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
parser.add_argument('--amp_lip_x', type=float, default=2.0)
3838
parser.add_argument('--amp_lip_y', type=float, default=2.0)
3939
parser.add_argument('--amp_pos', type=float, default=0.8)
40-
parser.add_argument('--reuse_train_emb_list', default=[]) # ['E_kmpT-EfOg']) # ['E_kmpT-EfOg']) # ['45hn7-LXDX8'])
40+
parser.add_argument('--reuse_train_emb_list', default=['45hn7-LXDX8']) # ['E_kmpT-EfOg']) # ['E_kmpT-EfOg']) # ['45hn7-LXDX8'])
4141

4242

4343
parser.add_argument('--add_audio_in', default=False, action='store_true')
@@ -177,27 +177,27 @@
177177

178178
os.remove(os.path.join('examples_cartoon', fls_names[i]))
179179

180-
# ==============================================
181-
# Step 4 : Vector art morphing (only work in WINDOWS)
182-
# ==============================================
183-
warp_exe = os.path.join(os.getcwd(), 'facewarp', 'facewarp.exe')
184-
import os
185-
186-
if (os.path.exists(os.path.join(output_dir, 'output'))):
187-
shutil.rmtree(os.path.join(output_dir, 'output'))
188-
os.mkdir(os.path.join(output_dir, 'output'))
189-
os.chdir('{}'.format(os.path.join(output_dir, 'output')))
190-
print(os.getcwd())
191-
192-
os.system('{} {} {} {} {} {}'.format(
193-
warp_exe,
194-
os.path.join('examples_cartoon', DEMO_CH+'.png'),
195-
os.path.join(output_dir, 'triangulation.txt'),
196-
os.path.join(output_dir, 'reference_points.txt'),
197-
os.path.join(output_dir, 'warped_points.txt'),
198-
# os.path.join(ROOT_DIR, 'puppets', sys.argv[6]),
199-
'-novsync -dump'))
200-
os.system('ffmpeg -y -r 62.5 -f image2 -i "%06d.tga" -i {} -shortest {}'.format(
201-
ain,
202-
os.path.join(output_dir, sys.argv[8])
203-
))
180+
# # ==============================================
181+
# # Step 4 : Vector art morphing (only work in WINDOWS)
182+
# # ==============================================
183+
# warp_exe = os.path.join(os.getcwd(), 'facewarp', 'facewarp.exe')
184+
# import os
185+
#
186+
# if (os.path.exists(os.path.join(output_dir, 'output'))):
187+
# shutil.rmtree(os.path.join(output_dir, 'output'))
188+
# os.mkdir(os.path.join(output_dir, 'output'))
189+
# os.chdir('{}'.format(os.path.join(output_dir, 'output')))
190+
# print(os.getcwd())
191+
#
192+
# os.system('{} {} {} {} {} {}'.format(
193+
# warp_exe,
194+
# os.path.join('examples_cartoon', DEMO_CH+'.png'),
195+
# os.path.join(output_dir, 'triangulation.txt'),
196+
# os.path.join(output_dir, 'reference_points.txt'),
197+
# os.path.join(output_dir, 'warped_points.txt'),
198+
# # os.path.join(ROOT_DIR, 'puppets', sys.argv[6]),
199+
# '-novsync -dump'))
200+
# os.system('ffmpeg -y -r 62.5 -f image2 -i "%06d.tga" -i {} -shortest {}'.format(
201+
# ain,
202+
# os.path.join(output_dir, sys.argv[8])
203+
# ))

src/approaches/train_audio2landmark.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ def __close_face_lip__(self, fl):
260260

261261
def test(self, au_emb=None):
262262
with torch.no_grad():
263-
self.__train_pass__(au_emb)
263+
self.__train_pass__(au_emb, vis_fls=True)
264264

265265
def __solve_inverse_lip2__(self, fl_dis_pred_pos_numpy):
266266
for j in range(fl_dis_pred_pos_numpy.shape[0]):

0 commit comments

Comments
 (0)