File size: 1,562 Bytes
826e275 7a75a15 826e275 d73a8e9 826e275 6e4f775 7a75a15 826e275 e0bb50d e34eb24 df273ff 826e275 6e4f775 e34eb24 6e4f775 6dd4824 826e275 e34eb24 826e275 7be248a 49efed6 826e275 e34eb24 42099a3 826e275 e34eb24 b1dd47e 826e275 b1dd47e 0499581 6dd4824 0499581 d73a8e9 b1dd47e 49efed6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
"""
The Streamlit app for the project demo.
In the demo, the user can write a prompt
and the model will generate a response using the grouped sampling algorithm.
"""
import streamlit as st
from torch.cuda import CudaError
from available_models import AVAILABLE_MODELS
from hanlde_form_submit import on_form_submit
st.title("A Single Usage is All You Need - Demo")
with st.form("request_form"):
selected_model_name: str = st.selectbox(
label="choose a model",
options=AVAILABLE_MODELS,
help="opt-iml-max-30b generates better texts but is slower",
)
output_length: int = st.number_input(
label="the length of the output (in tokens)",
min_value=1,
max_value=512,
value=5,
)
submitted_prompt: str = st.text_area(
label="prompt",
value="""
Keywords: cat, look, mouse
What is a sentence that includes all these keywords?
Answer:""",
max_chars=1024,
)
submitted: bool = st.form_submit_button(
label="generate text",
disabled=False,
)
if submitted:
try:
output = on_form_submit(
selected_model_name,
output_length,
submitted_prompt,
)
except CudaError as e:
st.error("Out of memory. Please try a smaller model, shorter prompt, or a smaller output length.")
except (ValueError, TypeError, RuntimeError) as e:
st.error(e)
else:
st.write(f"Generated text: {output}")
|