Removed video model
Browse files- .idea/.name +1 -0
- app.py +14 -17
.idea/.name
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
app.py
|
app.py
CHANGED
@@ -70,14 +70,14 @@ def load_image_model():
|
|
70 |
pipeline.load_lora_weights("LoRA dataset/Weights/pytorch_lora_weights.safetensors", weight_name="pytorch_lora_weights.safetensors")
|
71 |
return pipeline
|
72 |
|
73 |
-
@st.cache_resource
|
74 |
-
def load_video_model():
|
75 |
-
|
76 |
-
|
77 |
|
78 |
A2C_model = load_text_model()
|
79 |
image_service = load_image_model()
|
80 |
-
video_model = load_video_model()
|
81 |
|
82 |
if "audio_input" not in st.session_state:
|
83 |
st.session_state.audio_input = None
|
@@ -104,18 +104,15 @@ if st.session_state.audio_input:
|
|
104 |
if st.button("Generate Image and video from text prompt"):
|
105 |
st.session_state.image = image_service(st.session_state.captions).images[0]
|
106 |
image = st.session_state.image
|
107 |
-
video = video_model(
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
).frames[0]
|
112 |
-
st.session_state.video = video
|
113 |
-
export_to_video(video, "generated.mp4", fps=7)
|
114 |
-
|
115 |
-
|
116 |
-
st.image(image)
|
117 |
-
with c2:
|
118 |
-
st.video("generated.mp4")
|
119 |
|
120 |
|
121 |
|
|
|
70 |
pipeline.load_lora_weights("LoRA dataset/Weights/pytorch_lora_weights.safetensors", weight_name="pytorch_lora_weights.safetensors")
|
71 |
return pipeline
|
72 |
|
73 |
+
# @st.cache_resource
|
74 |
+
# def load_video_model():
|
75 |
+
# pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
|
76 |
+
# return pipeline
|
77 |
|
78 |
A2C_model = load_text_model()
|
79 |
image_service = load_image_model()
|
80 |
+
#video_model = load_video_model()
|
81 |
|
82 |
if "audio_input" not in st.session_state:
|
83 |
st.session_state.audio_input = None
|
|
|
104 |
if st.button("Generate Image and video from text prompt"):
|
105 |
st.session_state.image = image_service(st.session_state.captions).images[0]
|
106 |
image = st.session_state.image
|
107 |
+
# video = video_model(
|
108 |
+
# prompt = st.session_state.captions,
|
109 |
+
# image=st.session_state.image,
|
110 |
+
# num_inference_steps=50
|
111 |
+
# ).frames[0]
|
112 |
+
# st.session_state.video = video
|
113 |
+
# export_to_video(video, "generated.mp4", fps=7)
|
114 |
+
st.image(image)
|
115 |
+
|
|
|
|
|
|
|
116 |
|
117 |
|
118 |
|