yonikremer's picture
changed the default prompt
21c7771
raw
history blame
1.82 kB
"""
The Streamlit app for the project demo.
In the demo, the user can write a prompt
and the model will generate a response using the grouped sampling algorithm.
"""
import streamlit as st
from torch.cuda import CudaError
from available_models import AVAILABLE_MODELS
from hanlde_form_submit import on_form_submit
st.title("讚讙讬诪讛 讘拽讘讜爪讜转 - 砖讬诪讜砖 讬注讬诇 讘诪讜讚诇讬 砖驻讛 住讬讘转讬讬诐")
with st.form("request_form"):
selected_model_name: str = st.selectbox(
label="讘讞专讜 诪讜讚诇",
options=AVAILABLE_MODELS,
help="opt-iml-max-30b generates better texts but is slower",
)
output_length: int = st.number_input(
label="讻诪讜转 讛诪讬诇讬诐 讛诪拽住讬诪诇讬转 讘驻诇讟 - 讘讬谉 1 诇-2048",
min_value=1,
max_value=2048,
value=5,
)
submitted_prompt: str = st.text_area(
label="讛拽诇讟 诇讗诇讜讙专讬转诐 (讘讗谞讙诇讬转 讘诇讘讚)",
value="""Keywords: cat, look, mouse
What is a sentence that includes all these keywords?
Answer:""",
max_chars=1024,
)
submitted: bool = st.form_submit_button(
label="爪讜专 讟拽住讟",
disabled=False,
)
if submitted:
try:
output = on_form_submit(
selected_model_name,
output_length,
submitted_prompt,
)
except CudaError as e:
st.error("Out of memory. Please try a smaller model, shorter prompt, or a smaller output length.")
except (ValueError, TypeError, RuntimeError) as e:
st.error(e)
else:
st.write(f"Generated text: {output}")
with open("user_instructions_hebrew.md", "r") as fh:
long_description = fh.read()
st.markdown(long_description)