Spaces:
Runtime error
Runtime error
Use the latest modelscope
Browse files- Dockerfile +7 -0
- README.md +2 -1
- app.py +45 -5
- requirements.txt +1 -1
Dockerfile
CHANGED
@@ -43,6 +43,13 @@ RUN pyenv install ${PYTHON_VERSION} && \
|
|
43 |
COPY --chown=1000 ./requirements.txt /tmp/requirements.txt
|
44 |
RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
COPY --chown=1000 . ${HOME}/app
|
47 |
ENV PYTHONPATH=${HOME}/app \
|
48 |
PYTHONUNBUFFERED=1 \
|
|
|
43 |
COPY --chown=1000 ./requirements.txt /tmp/requirements.txt
|
44 |
RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt
|
45 |
|
46 |
+
RUN git clone https://github.com/modelscope/modelscope && \
|
47 |
+
cd modelscope && \
|
48 |
+
pip install -r requirements.txt && \
|
49 |
+
pip install . && \
|
50 |
+
cd .. && \
|
51 |
+
rm -rf modelscope
|
52 |
+
|
53 |
COPY --chown=1000 . ${HOME}/app
|
54 |
ENV PYTHONPATH=${HOME}/app \
|
55 |
PYTHONUNBUFFERED=1 \
|
README.md
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
---
|
2 |
title: ModelScope-Vid2Vid-XL
|
3 |
-
emoji:
|
4 |
colorFrom: indigo
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
|
10 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: ModelScope-Vid2Vid-XL
|
3 |
+
emoji: π₯
|
4 |
colorFrom: indigo
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
+
suggested_hardware: a10g-large
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -4,6 +4,7 @@ import os
|
|
4 |
import pathlib
|
5 |
import tempfile
|
6 |
|
|
|
7 |
import gradio as gr
|
8 |
import torch
|
9 |
from huggingface_hub import snapshot_download
|
@@ -11,6 +12,8 @@ from modelscope.outputs import OutputKeys
|
|
11 |
from modelscope.pipelines import pipeline
|
12 |
|
13 |
DESCRIPTION = "# ModelScope-Vid2Vid-XL"
|
|
|
|
|
14 |
|
15 |
if torch.cuda.is_available():
|
16 |
model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models")
|
@@ -21,7 +24,20 @@ else:
|
|
21 |
pipe = None
|
22 |
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def video_to_video(video_path: str, text: str) -> str:
|
|
|
25 |
p_input = {"video_path": video_path, "text": text}
|
26 |
output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
27 |
pipe(p_input, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO]
|
@@ -30,14 +46,38 @@ def video_to_video(video_path: str, text: str) -> str:
|
|
30 |
|
31 |
with gr.Blocks(css="style.css") as demo:
|
32 |
gr.Markdown(DESCRIPTION)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
run_button.click(
|
|
|
|
|
|
|
|
|
|
|
38 |
fn=video_to_video,
|
39 |
inputs=[input_video, text_description],
|
40 |
outputs=output_video,
|
41 |
api_name="run",
|
42 |
)
|
43 |
-
|
|
|
|
|
|
4 |
import pathlib
|
5 |
import tempfile
|
6 |
|
7 |
+
import cv2
|
8 |
import gradio as gr
|
9 |
import torch
|
10 |
from huggingface_hub import snapshot_download
|
|
|
12 |
from modelscope.pipelines import pipeline
|
13 |
|
14 |
DESCRIPTION = "# ModelScope-Vid2Vid-XL"
|
15 |
+
if not torch.cuda.is_available():
|
16 |
+
DESCRIPTION += "\n<p>Running on CPU π₯Ά This demo does not work on CPU.</p>"
|
17 |
|
18 |
if torch.cuda.is_available():
|
19 |
model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models")
|
|
|
24 |
pipe = None
|
25 |
|
26 |
|
27 |
+
def check_input_video(video_path: str) -> None:
|
28 |
+
cap = cv2.VideoCapture(video_path)
|
29 |
+
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
30 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
31 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
32 |
+
cap.release()
|
33 |
+
if n_frames != 32 or width != 448 or height != 256:
|
34 |
+
raise gr.Error(
|
35 |
+
f"Input video must be 32 frames of size 448x256. Your video is {n_frames} frames of size {width}x{height}."
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
def video_to_video(video_path: str, text: str) -> str:
|
40 |
+
check_input_video(video_path)
|
41 |
p_input = {"video_path": video_path, "text": text}
|
42 |
output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
43 |
pipe(p_input, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO]
|
|
|
46 |
|
47 |
with gr.Blocks(css="style.css") as demo:
|
48 |
gr.Markdown(DESCRIPTION)
|
49 |
+
gr.DuplicateButton(
|
50 |
+
value="Duplicate Space for private use",
|
51 |
+
elem_id="duplicate-button",
|
52 |
+
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
|
53 |
+
)
|
54 |
+
with gr.Group():
|
55 |
+
input_video = gr.Video(label="Input video", type="filepath")
|
56 |
+
text_description = gr.Textbox(label="Text description")
|
57 |
+
run_button = gr.Button()
|
58 |
+
output_video = gr.Video(label="Output video")
|
59 |
+
text_description.submit(
|
60 |
+
fn=check_input_video,
|
61 |
+
inputs=input_video,
|
62 |
+
queue=False,
|
63 |
+
api_name=None,
|
64 |
+
).success(
|
65 |
+
fn=video_to_video,
|
66 |
+
inputs=[input_video, text_description],
|
67 |
+
outputs=output_video,
|
68 |
+
api_name=None,
|
69 |
+
)
|
70 |
run_button.click(
|
71 |
+
fn=check_input_video,
|
72 |
+
inputs=input_video,
|
73 |
+
queue=False,
|
74 |
+
api_name=None,
|
75 |
+
).success(
|
76 |
fn=video_to_video,
|
77 |
inputs=[input_video, text_description],
|
78 |
outputs=output_video,
|
79 |
api_name="run",
|
80 |
)
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
demo.queue(max_size=10).launch()
|
requirements.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
easydict==1.10
|
2 |
einops==0.6.1
|
3 |
fairscale==0.4.13
|
4 |
-
gradio==3.41.
|
5 |
huggingface_hub==0.16.4
|
6 |
imageio==2.31.1
|
7 |
modelscope==1.8.4
|
|
|
1 |
easydict==1.10
|
2 |
einops==0.6.1
|
3 |
fairscale==0.4.13
|
4 |
+
gradio==3.41.2
|
5 |
huggingface_hub==0.16.4
|
6 |
imageio==2.31.1
|
7 |
modelscope==1.8.4
|