GenBlogDemo / app.py
Krzysztof Krystian Jankowski
initial commit
076a09d
raw
history blame
1.41 kB
import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_community.llms import CTransformers
# load the model
def getLlamaResponse(input_text, no_words, blog_style):
llm=CTransformers(model='models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf',
model_type='llama',
config={'max_new_tokens':256, 'temperature':0.2})
# create a prompt
template="""
Write a blog post about the topic: {input_text} in {blog_style} style. The blog should be {no_words} words long.
"""
prompt=PromptTemplate(input_variables=["blog_style", "input_text", "no_words"],
template=template)
# generate the response
response=llm.invoke(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
return response
# Streamlit UI
st.set_page_config(page_title="GenBlog Demo",
page_icon="📚",
layout="centered",
initial_sidebar_state='collapsed')
st.header("GenBlog Demo 📚")
input_text=st.text_input("Enter the Blog Topic")
col1, col2 = st.columns([5, 5])
with col1:
no_words=st.text_input("Enter the number of words", value=100)
with col2:
blog_style=st.selectbox("Select the Blog Style", ["Personal", "Research", "Story Driven"])
submit=st.button("Generate Blog")
if submit:
st.write(getLlamaResponse(input_text, no_words, blog_style))