Spaces:
Running
Running
RafaelJaime
commited on
Commit
•
3ed0410
1
Parent(s):
d01b870
Upload 2 files
Browse files- app.py +32 -10
- requirements.txt +3 -1
app.py
CHANGED
@@ -1,12 +1,17 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
from bark import SAMPLE_RATE, generate_audio
|
4 |
-
from transformers import pipeline
|
|
|
5 |
import time
|
6 |
import torch
|
|
|
|
|
|
|
7 |
|
8 |
generate_story_pipe = pipeline("text-generation", model="openai-community/gpt2")
|
9 |
-
|
|
|
10 |
|
11 |
# Para medir el rendimiento de los métodos, voy a crear este decorador, que simplemente imprime en nuestra terminal el tiempo de ejecucion de los metodos que tengan los modelos y los usen, de esta manera podremos estudiar el tiempo que este cada modelo activo
|
12 |
def measure_performance(func):
|
@@ -20,13 +25,6 @@ def measure_performance(func):
|
|
20 |
return result
|
21 |
return wrapper
|
22 |
|
23 |
-
# Ejemplos que pondre debajo de la ventana
|
24 |
-
examples = [
|
25 |
-
["A love story"],
|
26 |
-
["An humor story"],
|
27 |
-
["A history about a vampire who dance Reggaeton"],
|
28 |
-
]
|
29 |
-
|
30 |
@measure_performance
|
31 |
def generate_story(theme):
|
32 |
prompt = f"Tell me a storie about {theme}"
|
@@ -40,6 +38,21 @@ def gen_tts(text):
|
|
40 |
audio_arr = (audio_arr * 32767).astype(np.int16)
|
41 |
return (SAMPLE_RATE, audio_arr)
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def generate_story_and_convert_to_audio(theme):
|
44 |
story_text = generate_story(theme)
|
45 |
return gen_tts(story_text)
|
@@ -50,6 +63,13 @@ def subtitle():
|
|
50 |
<p>Cuando se necesite dormir a los mas pequeños de la casa, podrá usar este espacio, en el cual se integrarán varias inteligencias artificiales diseñadas para contar historias de todo tipo, al estilo del libro Las mil y una noches.</p>
|
51 |
""")
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
# Documentacion
|
54 |
def end_text():
|
55 |
gr.Markdown("<h2>Documentación</h2>")
|
@@ -123,10 +143,12 @@ with gr.Blocks(css=css) as block:
|
|
123 |
story_theme = gr.Textbox(label="Escribe de que quieres que vaya la historai", placeholder="Introduce la descripción de la historia aquí...")
|
124 |
with gr.Column():
|
125 |
audio_out = gr.Audio(label="Generated Audio", type="numpy", elem_id="audio_out")
|
|
|
126 |
run_button = gr.Button("Cuéntame la historia")
|
127 |
run_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=None, queue=False).then(
|
128 |
fn=generate_story_and_convert_to_audio, inputs=[story_theme], outputs=[audio_out], queue=True).then(
|
129 |
-
|
|
|
130 |
|
131 |
|
132 |
gr.Examples(examples=examples, fn=generate_story_and_convert_to_audio, inputs=[story_theme],
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
from bark import SAMPLE_RATE, generate_audio
|
4 |
+
from transformers import pipeline
|
5 |
+
from diffusers import DiffusionPipeline
|
6 |
import time
|
7 |
import torch
|
8 |
+
import random
|
9 |
+
|
10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
generate_story_pipe = pipeline("text-generation", model="openai-community/gpt2")
|
13 |
+
image_pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype= torch.float16 if torch.cuda.is_available() else torch.float32)
|
14 |
+
image_pipe = image_pipe.to(device)
|
15 |
|
16 |
# Para medir el rendimiento de los métodos, voy a crear este decorador, que simplemente imprime en nuestra terminal el tiempo de ejecucion de los metodos que tengan los modelos y los usen, de esta manera podremos estudiar el tiempo que este cada modelo activo
|
17 |
def measure_performance(func):
|
|
|
25 |
return result
|
26 |
return wrapper
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
@measure_performance
|
29 |
def generate_story(theme):
|
30 |
prompt = f"Tell me a storie about {theme}"
|
|
|
38 |
audio_arr = (audio_arr * 32767).astype(np.int16)
|
39 |
return (SAMPLE_RATE, audio_arr)
|
40 |
|
41 |
+
@measure_performance
|
42 |
+
def generate_image(prompt):
|
43 |
+
seed = random.randint(0, 1000)
|
44 |
+
generator = torch.Generator().manual_seed(seed)
|
45 |
+
|
46 |
+
image = image_pipe(
|
47 |
+
prompt=prompt,
|
48 |
+
num_inference_steps=2,
|
49 |
+
width=256,
|
50 |
+
height=256,
|
51 |
+
generator=generator,
|
52 |
+
).images[0]
|
53 |
+
|
54 |
+
return image
|
55 |
+
|
56 |
def generate_story_and_convert_to_audio(theme):
|
57 |
story_text = generate_story(theme)
|
58 |
return gen_tts(story_text)
|
|
|
63 |
<p>Cuando se necesite dormir a los mas pequeños de la casa, podrá usar este espacio, en el cual se integrarán varias inteligencias artificiales diseñadas para contar historias de todo tipo, al estilo del libro Las mil y una noches.</p>
|
64 |
""")
|
65 |
|
66 |
+
# Ejemplos que pondre debajo de la ventana
|
67 |
+
examples = [
|
68 |
+
["A love story"],
|
69 |
+
["An humor story"],
|
70 |
+
["A history about a vampire who dance Reggaeton"],
|
71 |
+
]
|
72 |
+
|
73 |
# Documentacion
|
74 |
def end_text():
|
75 |
gr.Markdown("<h2>Documentación</h2>")
|
|
|
143 |
story_theme = gr.Textbox(label="Escribe de que quieres que vaya la historai", placeholder="Introduce la descripción de la historia aquí...")
|
144 |
with gr.Column():
|
145 |
audio_out = gr.Audio(label="Generated Audio", type="numpy", elem_id="audio_out")
|
146 |
+
image_out = gr.Image(label="Generated image", show_label=False)
|
147 |
run_button = gr.Button("Cuéntame la historia")
|
148 |
run_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=None, queue=False).then(
|
149 |
fn=generate_story_and_convert_to_audio, inputs=[story_theme], outputs=[audio_out], queue=True).then(
|
150 |
+
fn=lambda theme: generate_image(theme), inputs=[story_theme], outputs=[image_out], queue=True).then(
|
151 |
+
fn=lambda: gr.update(visible=True), inputs=None, outputs=None, queue=False)
|
152 |
|
153 |
|
154 |
gr.Examples(examples=examples, fn=generate_story_and_convert_to_audio, inputs=[story_theme],
|
requirements.txt
CHANGED
@@ -2,4 +2,6 @@ torch
|
|
2 |
transformers
|
3 |
sentencepiece
|
4 |
sacremoses
|
5 |
-
bark
|
|
|
|
|
|
2 |
transformers
|
3 |
sentencepiece
|
4 |
sacremoses
|
5 |
+
bark
|
6 |
+
diffusers
|
7 |
+
random
|