xuehongyang commited on
Commit
54a5078
β€’
1 Parent(s): 83d8d3c

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

fix

f

fix

fix

fix

fix

more

more

fix

Files changed (7) hide show
  1. Dockerfile +16 -0
  2. README.md +2 -2
  3. app.py +22 -25
  4. configs/train_config.py +4 -4
  5. data_process/model.py +0 -1
  6. data_process/resnet.py +0 -1
  7. server.sh +1 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM xuehy93/hififace:1.0
2
+
3
+
4
+ RUN apt update && apt install -y wget
5
+
6
+
7
+ WORKDIR /
8
+
9
+ RUN wget https://public.ph.files.1drv.com/y4m_El1_AyFLmGuZaPWOqkytzM4qYtDc3BvNNL99JV1OLCEkmD4RTQjtHEXZ0SAWb7UPLV1IPB0KO2rFlyGJaV_kITLbuAHzJ73GwR_cgvXpkIGywaTnKsKVV1jJe1LoFcl7XsxatyGpaC8-Gupq6jjBnaqSBH4dgfYAmzUk8Wqiiuj_ml2duU7No0M1T426y3RqOJsqVHXEMVfV0B6HjzQFKCCZIgfHjjHvLIB3B3xP8Q?AVOverride=1 -O checkpoints.tar.gz
10
+
11
+ RUN tar xfz checkpoints.tar.gz
12
+
13
+ WORKDIR /app
14
+ ADD ./ /app
15
+ RUN chmod +x ./server.sh
16
+ CMD ["./server.sh"]
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: HiFiFace Inference
3
  emoji: πŸ“‰
4
- colorFrom: blue
5
- colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
  license: mit
 
1
  ---
2
  title: HiFiFace Inference
3
  emoji: πŸ“‰
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
  sdk: docker
7
  pinned: false
8
  license: mit
app.py CHANGED
@@ -9,7 +9,7 @@ from models.model import HifiFace
9
 
10
 
11
  class ConfigPath:
12
- face_detector_weights = "./checkpoints/face_detector/face_detector_scrfd_10g_bnkps.onnx"
13
  model_path = ""
14
  model_idx = 80000
15
  ffmpeg_device = "cuda"
@@ -21,7 +21,7 @@ def main():
21
  parser = argparse.ArgumentParser(
22
  prog="benchmark", description="What the program does", epilog="Text at the bottom of help"
23
  )
24
- parser.add_argument("-m", "--model_path", default="./checkpoints/standard_model")
25
  parser.add_argument("-i", "--model_idx", default="320000")
26
  parser.add_argument("-f", "--ffmpeg_device", default="cpu")
27
  parser.add_argument("-d", "--device", default="cpu")
@@ -34,25 +34,29 @@ def main():
34
  cfg.device = args.device
35
  opt = TrainConfig()
36
  checkpoint = (cfg.model_path, cfg.model_idx)
 
 
37
  model = HifiFace(opt.identity_extractor_config, is_training=False, device=cfg.device, load_checkpoint=checkpoint)
38
 
 
39
  image_infer = ImageSwap(cfg, model)
40
- video_infer = VideoSwap(cfg, model)
41
-
42
  def inference_image(source_face, target_face, shape_rate, id_rate, iterations):
43
  return image_infer.inference(source_face, target_face, shape_rate, id_rate, int(iterations))
44
 
45
- def inference_video(source_face, target_video, shape_rate, id_rate, iterations):
46
- return video_infer.inference(source_face, target_video, shape_rate, id_rate, int(iterations))
47
 
48
  model_name = cfg.model_path.split("/")[-1] + ":" + f"{cfg.model_idx}"
 
49
  with gr.Blocks(title="FaceSwap") as demo:
50
  gr.Markdown(
51
  f"""
52
- ### model: {model_name}
 
53
  """
54
  )
55
- with gr.Tab("Image swap"):
56
  with gr.Row():
57
  source_image = gr.Image(shape=None, label="source image")
58
  target_image = gr.Image(shape=None, label="target image")
@@ -88,10 +92,10 @@ def main():
88
  outputs=output_image,
89
  )
90
 
91
- with gr.Tab("Video swap"):
92
  with gr.Row():
93
  source_image = gr.Image(shape=None, label="source image")
94
- target_video = gr.Video(value=None, label="target video")
95
  with gr.Row():
96
  with gr.Column():
97
  structure_sim = gr.Slider(
@@ -115,22 +119,15 @@ def main():
115
  step=1,
116
  label="iters",
117
  )
118
- video_btn = gr.Button("video swap")
119
- output_video = gr.Video(value=None, label="Result")
120
-
121
- video_btn.click(
122
- fn=inference_video,
123
- inputs=[
124
- source_image,
125
- target_video,
126
- structure_sim,
127
- id_sim,
128
- iters,
129
- ],
130
- outputs=output_video,
131
- )
132
 
133
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
134
 
135
 
136
  if __name__ == "__main__":
 
9
 
10
 
11
  class ConfigPath:
12
+ face_detector_weights = "/checkpoints/face_detector/face_detector_scrfd_10g_bnkps.onnx"
13
  model_path = ""
14
  model_idx = 80000
15
  ffmpeg_device = "cuda"
 
21
  parser = argparse.ArgumentParser(
22
  prog="benchmark", description="What the program does", epilog="Text at the bottom of help"
23
  )
24
+ parser.add_argument("-m", "--model_path", default="/checkpoints/hififace_pretrained/standard_model")
25
  parser.add_argument("-i", "--model_idx", default="320000")
26
  parser.add_argument("-f", "--ffmpeg_device", default="cpu")
27
  parser.add_argument("-d", "--device", default="cpu")
 
34
  cfg.device = args.device
35
  opt = TrainConfig()
36
  checkpoint = (cfg.model_path, cfg.model_idx)
37
+ model_path_1 = "/checkpoints/hififace_pretrained/with_gaze_and_mouth"
38
+ checkpoint1 = ("/checkpoints/hififace_pretrained/with_gaze_and_mouth", "190000")
39
  model = HifiFace(opt.identity_extractor_config, is_training=False, device=cfg.device, load_checkpoint=checkpoint)
40
 
41
+ model1 = HifiFace(opt.identity_extractor_config, is_training=False, device=cfg.device, load_checkpoint=checkpoint1)
42
  image_infer = ImageSwap(cfg, model)
43
+ image_infer1 = ImageSwap(cfg, model1)
 
44
  def inference_image(source_face, target_face, shape_rate, id_rate, iterations):
45
  return image_infer.inference(source_face, target_face, shape_rate, id_rate, int(iterations))
46
 
47
+ def inference_image1(source_face, target_face, shape_rate, id_rate, iterations):
48
+ return image_infer1.inference(source_face, target_face, shape_rate, id_rate, int(iterations))
49
 
50
  model_name = cfg.model_path.split("/")[-1] + ":" + f"{cfg.model_idx}"
51
+ model_name1 = model_path_1.split("/")[-1] + ":" + "190000"
52
  with gr.Blocks(title="FaceSwap") as demo:
53
  gr.Markdown(
54
  f"""
55
+ ### standard model: {model_name} \n
56
+ ### model with eye and mouth hm loss: {model_name1}
57
  """
58
  )
59
+ with gr.Tab("Image swap with standard model"):
60
  with gr.Row():
61
  source_image = gr.Image(shape=None, label="source image")
62
  target_image = gr.Image(shape=None, label="target image")
 
92
  outputs=output_image,
93
  )
94
 
95
+ with gr.Tab("Image swap with eye&mouth hm loss model"):
96
  with gr.Row():
97
  source_image = gr.Image(shape=None, label="source image")
98
+ target_image = gr.Image(shape=None, label="target image")
99
  with gr.Row():
100
  with gr.Column():
101
  structure_sim = gr.Slider(
 
119
  step=1,
120
  label="iters",
121
  )
122
+ image_btn = gr.Button("image swap")
123
+ output_image = gr.Image(shape=None, label="Result")
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ image_btn.click(
126
+ fn=inference_image1,
127
+ inputs=[source_image, target_image, structure_sim, id_sim, iters],
128
+ outputs=output_image,
129
+ )
130
+ demo.launch(server_name="0.0.0.0", server_port=7860)
131
 
132
 
133
  if __name__ == "__main__":
configs/train_config.py CHANGED
@@ -30,10 +30,10 @@ class TrainConfig:
30
  load_checkpoint = None # ("/data/checkpoints/hififace/rebuilt_discriminator_SFF_c256_1683367464544", 400000)
31
 
32
  identity_extractor_config = {
33
- "f_3d_checkpoint_path": "./checkpoints/Deep3DFaceRecon/epoch_20_new.pth",
34
- "f_id_checkpoint_path": "./checkpoints/arcface/ms1mv3_arcface_r100_fp16_backbone.pth",
35
- "bfm_folder": "./checkpoints/useful_ckpt/BFM",
36
- "hrnet_path": "./checkpoints/useful_ckpt/face_98lmks/HR18-WFLW.pth",
37
  }
38
 
39
  visualize_interval: int = 100
 
30
  load_checkpoint = None # ("/data/checkpoints/hififace/rebuilt_discriminator_SFF_c256_1683367464544", 400000)
31
 
32
  identity_extractor_config = {
33
+ "f_3d_checkpoint_path": "/checkpoints/Deep3DFaceRecon/epoch_20_new.pth",
34
+ "f_id_checkpoint_path": "/checkpoints/arcface/ms1mv3_arcface_r100_fp16_backbone.pth",
35
+ "bfm_folder": "/checkpoints/useful_ckpt/BFM",
36
+ "hrnet_path": "/checkpoints/useful_ckpt/face_98lmks/HR18-WFLW.pth",
37
  }
38
 
39
  visualize_interval: int = 100
data_process/model.py CHANGED
@@ -1,4 +1,3 @@
1
- #!/usr/bin/python
2
  # -*- encoding: utf-8 -*-
3
  from typing import Tuple
4
 
 
 
1
  # -*- encoding: utf-8 -*-
2
  from typing import Tuple
3
 
data_process/resnet.py CHANGED
@@ -1,4 +1,3 @@
1
- #!/usr/bin/python
2
  # -*- encoding: utf-8 -*-
3
  import torch
4
  import torch.nn as nn
 
 
1
  # -*- encoding: utf-8 -*-
2
  import torch
3
  import torch.nn as nn
server.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python3 app.py