Spaces:
Runtime error
Runtime error
import streamlit as st | |
import openai | |
import os | |
import base64 | |
import glob | |
import json | |
import mistune | |
import pytz | |
from datetime import datetime | |
from openai import ChatCompletion | |
from xml.etree import ElementTree as ET | |
from bs4 import BeautifulSoup | |
openai.api_key = os.getenv('OPENAI_KEY') | |
st.set_page_config( | |
page_title="GPT Streamlit Document Reasoner", | |
layout="wide") | |
st.title("GPT Chat with Optional File Context - Talk to your data!") | |
# Output options sidebar menu | |
st.sidebar.title("Output Options") | |
menu = ["txt", "htm", "md"] | |
choice = st.sidebar.selectbox("Choose an output file type to save your prompt and results", menu) | |
choicePrefix = "Output and download file set to " | |
if choice == "txt": | |
st.sidebar.write(choicePrefix + "Text file.") | |
elif choice == "htm": | |
st.sidebar.write(choicePrefix + "HTML5.") | |
elif choice == "md": | |
st.sidebar.write(choicePrefix + "Markdown.") | |
# sidebar slider for file input length to include in inference blocks | |
max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=3000, step=1000) | |
# Truncate document | |
def truncate_document(document, length): | |
return document[:length] | |
def chat_with_model(prompts): | |
model = "gpt-3.5-turbo" | |
#model = "gpt-4-32k" # 32k tokens between prompt and inference tokens | |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] | |
conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts]) | |
response = openai.ChatCompletion.create(model=model, messages=conversation) | |
return response['choices'][0]['message']['content'] | |
def generate_filename(prompt): | |
central = pytz.timezone('US/Central') | |
safe_date_time = datetime.now(central).strftime("%m%d_%I_%M_%p") | |
safe_prompt = "".join(x for x in prompt if x.isalnum())[:30] | |
return f"{safe_date_time}_{safe_prompt}.txt" | |
def create_file(filename, prompt, response): | |
with open(filename, 'w') as file: | |
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>") | |
def get_table_download_link_old(file_path): | |
with open(file_path, 'r') as file: | |
data = file.read() | |
b64 = base64.b64encode(data.encode()).decode() | |
href = f'<a href="data:file/htm;base64,{b64}" target="_blank" download="{os.path.basename(file_path)}">{os.path.basename(file_path)}</a>' | |
return href | |
def get_table_download_link(file_path): | |
import os | |
import base64 | |
with open(file_path, 'r') as file: | |
data = file.read() | |
b64 = base64.b64encode(data.encode()).decode() | |
file_name = os.path.basename(file_path) | |
ext = os.path.splitext(file_name)[1] # get the file extension | |
if ext == '.txt': | |
mime_type = 'text/plain' | |
elif ext == '.htm': | |
mime_type = 'text/html' | |
elif ext == '.md': | |
mime_type = 'text/markdown' | |
else: | |
mime_type = 'application/octet-stream' # general binary data type | |
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>' | |
return href | |
def CompressXML(xml_text): | |
root = ET.fromstring(xml_text) | |
for elem in list(root.iter()): | |
if isinstance(elem.tag, str) and 'Comment' in elem.tag: | |
elem.parent.remove(elem) | |
#return ET.tostring(root, encoding='unicode', method="xml") | |
return ET.tostring(root, encoding='unicode', method="xml")[:max_length] | |
def read_file_content(file): | |
if file.type == "application/json": | |
content = json.load(file) | |
return str(content) | |
elif file.type == "text/html" or file.type == "text/htm": | |
content = BeautifulSoup(file, "html.parser") | |
return content.text | |
elif file.type == "application/xml" or file.type == "text/xml": | |
tree = ET.parse(file) | |
root = tree.getroot() | |
#return ET.tostring(root, encoding='unicode') | |
return CompressXML(ET.tostring(root, encoding='unicode')) | |
elif file.type == "text/markdown" or file.type == "text/md": | |
md = mistune.create_markdown() | |
content = md(file.read().decode()) | |
return content | |
elif file.type == "text/plain": | |
return file.getvalue().decode() | |
else: | |
return "" | |
def main(): | |
prompts = [''] | |
file_content = "" | |
user_prompt = st.text_area("Your question:", '', height=120) | |
uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"]) | |
if user_prompt: | |
prompts.append(user_prompt) | |
if uploaded_file is not None: | |
file_content = read_file_content(uploaded_file) | |
prompts.append(file_content) | |
if st.button('๐ฌ Chat'): | |
st.write('Chatting with GPT-3...') | |
response = chat_with_model(prompts) | |
st.write('Response:') | |
st.write(response) | |
filename = generate_filename(user_prompt) | |
create_file(filename, user_prompt, response) | |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) | |
if len(file_content) > 0: | |
st.markdown(f"**Content Added to Prompt:**\n{file_content}") | |
htm_files = glob.glob("*.txt") | |
for file in htm_files: | |
st.sidebar.markdown(get_table_download_link(file), unsafe_allow_html=True) | |
if st.sidebar.button(f"๐Delete {file}"): | |
#if st.sidebar.button("๐ Delete"): | |
os.remove(file) | |
st.experimental_rerun() | |
if __name__ == "__main__": | |
main() |