lev1 commited on
Commit
38cb44d
1 Parent(s): 6402659

text improvement

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -4,7 +4,7 @@ import torch
4
  from model import Model, ModelType
5
 
6
  # from app_canny import create_demo as create_demo_canny
7
- from app_pose import create_demo as create_demo_pose
8
  from app_text_to_video import create_demo as create_demo_text_to_video
9
  from app_pix2pix_video import create_demo as create_demo_pix2pix_video
10
  # from app_canny_db import create_demo as create_demo_canny_db
@@ -20,15 +20,19 @@ with gr.Blocks(css='style.css') as demo:
20
  Text2Video-Zero
21
  </h1>
22
  <h2 style="font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
23
- We propose <b>Text2Video-Zero, the first zero-shot text-to-video synthesis framework</b>, that also natively supports, Video Instruct Pix2Pix, Pose Conditional, Edge Conditional
24
- and, Edge Conditional and DreamBooth Specialized applications.
 
25
  </h2>
26
  <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
27
- Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, <a href="https://www.ece.utexas.edu/people/faculty/atlas-wang">Atlas Wang</a>, Shant Navasardyan
28
  and <a href="https://www.humphreyshi.com/home">Humphrey Shi</a>
29
  [<a href="https://arxiv.org/abs/2303.13439" style="color:blue;">arXiv</a>]
30
  [<a href="https://github.com/Picsart-AI-Research/Text2Video-Zero" style="color:blue;">GitHub</a>]
31
  </h3>
 
 
 
32
  </div>
33
  """)
34
 
@@ -38,9 +42,9 @@ with gr.Blocks(css='style.css') as demo:
38
  with gr.Tab('Video Instruct Pix2Pix'):
39
  # pass
40
  create_demo_pix2pix_video(model)
41
- with gr.Tab('Pose Conditional'):
42
- # pass
43
- create_demo_pose(model)
44
  # with gr.Tab('Edge Conditional'):
45
  # pass
46
  # # create_demo_canny(model)
 
4
  from model import Model, ModelType
5
 
6
  # from app_canny import create_demo as create_demo_canny
7
+ # from app_pose import create_demo as create_demo_pose
8
  from app_text_to_video import create_demo as create_demo_text_to_video
9
  from app_pix2pix_video import create_demo as create_demo_pix2pix_video
10
  # from app_canny_db import create_demo as create_demo_canny_db
 
20
  Text2Video-Zero
21
  </h1>
22
  <h2 style="font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
23
+ We built <b>Text2Video-Zero, a first zero-shot text-to-video synthesis diffusion framework, that enables low cost yet high-quality and consistent video generation with only pre-trained text-to-image diffusion models without any training on videos or optimization!
24
+ Text2Video-Zero also naturally supports cool derivative works of pre-trained text-to-image models such as Instruct Pix2Pix, ControlNet and DreamBooth, and based on which we present Video Instruct Pix2Pix, Pose Conditional, Edge Conditional and, Edge Conditional and DreamBooth Specialized applications.
25
+ We hope our Text2Video-Zero will further democratize AI and empower creativity of everyone by unleashing the zero-shot video generation and editing capacity of the amazing text-to-image models and encourages future research!
26
  </h2>
27
  <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
28
+ Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan
29
  and <a href="https://www.humphreyshi.com/home">Humphrey Shi</a>
30
  [<a href="https://arxiv.org/abs/2303.13439" style="color:blue;">arXiv</a>]
31
  [<a href="https://github.com/Picsart-AI-Research/Text2Video-Zero" style="color:blue;">GitHub</a>]
32
  </h3>
33
+ # <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
34
+
35
+ # </h3>
36
  </div>
37
  """)
38
 
 
42
  with gr.Tab('Video Instruct Pix2Pix'):
43
  # pass
44
  create_demo_pix2pix_video(model)
45
+ # with gr.Tab('Pose Conditional'):
46
+ # # pass
47
+ # create_demo_pose(model)
48
  # with gr.Tab('Edge Conditional'):
49
  # pass
50
  # # create_demo_canny(model)