from huggingface_hub import snapshot_download | |
from modelscope.pipelines import pipeline | |
from modelscope.outputs import OutputKeys | |
import pathlib | |
import gradio as gr | |
def video_gen(prompt): | |
model_dir = pathlib.Path('weights') | |
snapshot_download('damo-vilab/modelscope-damo-text-to-video-synthesis', | |
repo_type='model', local_dir=model_dir) | |
pipe = pipeline('text-to-video-synthesis', model_dir.as_posix()) | |
prompt = { | |
'text': 'A panda eating bamboo on a rock.', | |
} | |
output_video_path = pipe(prompt,)[OutputKeys.OUTPUT_VIDEO] | |
return output_video_path | |
demo = gr.Interface(fn=video_gen, inputs="text", outputs="video") | |
demo.launch(share=True) | |