Spaces:
Runtime error
Runtime error
File size: 5,375 Bytes
a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 187aa1a a4f0d66 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(
page_title="GPT Streamlit Document Reasoner",
layout="wide")
# st.title("GPT Chat with Optional File Context - Talk to your data!")
# Output options sidebar menu
# st.sidebar.title("Output Options")
menu = ["txt", "htm", "md"]
choice = st.sidebar.selectbox("Choose output file type to save results", menu)
choicePrefix = "Output and download file set to "
if choice == "txt":
st.sidebar.write(choicePrefix + "Text file.")
elif choice == "htm":
st.sidebar.write(choicePrefix + "HTML5.")
elif choice == "md":
st.sidebar.write(choicePrefix + "Markdown.")
elif choice == "py":
st.sidebar.write(choicePrefix + "Python AI UI/UX")
# sidebar slider for file input length to include in inference blocks
max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=3000, step=1000)
# Truncate document
def truncate_document(document, length):
return document[:length]
def chat_with_model(prompts):
model = "gpt-3.5-turbo"
#model = "gpt-4-32k" # 32k tokens between prompt and inference tokens
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])
response = openai.ChatCompletion.create(model=model, messages=conversation)
return response['choices'][0]['message']['content']
def generate_filename(prompt):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%I_%M_%p")
safe_prompt = "".join(x for x in prompt if x.isalnum())[:30]
return f"{safe_date_time}_{safe_prompt}.txt"
def create_file(filename, prompt, response):
with open(filename, 'w') as file:
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
def get_table_download_link_old(file_path):
with open(file_path, 'r') as file:
data = file.read()
b64 = base64.b64encode(data.encode()).decode()
href = f'<a href="data:file/htm;base64,{b64}" target="_blank" download="{os.path.basename(file_path)}">{os.path.basename(file_path)}</a>'
return href
def get_table_download_link(file_path):
import os
import base64
with open(file_path, 'r') as file:
data = file.read()
b64 = base64.b64encode(data.encode()).decode()
file_name = os.path.basename(file_path)
ext = os.path.splitext(file_name)[1] # get the file extension
if ext == '.txt':
mime_type = 'text/plain'
elif ext == '.htm':
mime_type = 'text/html'
elif ext == '.md':
mime_type = 'text/markdown'
else:
mime_type = 'application/octet-stream' # general binary data type
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
return href
def CompressXML(xml_text):
root = ET.fromstring(xml_text)
for elem in list(root.iter()):
if isinstance(elem.tag, str) and 'Comment' in elem.tag:
elem.parent.remove(elem)
return ET.tostring(root, encoding='unicode', method="xml")
def read_file_content(file,max_length):
if file.type == "application/json":
content = json.load(file)
return str(content)
elif file.type == "text/html" or file.type == "text/htm":
content = BeautifulSoup(file, "html.parser")
return content.text
elif file.type == "application/xml" or file.type == "text/xml":
tree = ET.parse(file)
root = tree.getroot()
xml = CompressXML(ET.tostring(root, encoding='unicode'))
return xml
elif file.type == "text/markdown" or file.type == "text/md":
md = mistune.create_markdown()
content = md(file.read().decode())
return content
elif file.type == "text/plain":
return file.getvalue().decode()
else:
return ""
def main():
prompts = ['']
file_content = ""
user_prompt = st.text_area("Your question:", '', height=120)
uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"])
if user_prompt:
prompts.append(user_prompt)
if uploaded_file is not None:
file_content = read_file_content(uploaded_file, max_length)
prompts.append(file_content)
if st.button('π¬ Chat'):
st.write('Thinking and Reasoning with your inputs...')
response = chat_with_model(prompts)
st.write('Response:')
st.write(response)
filename = generate_filename(user_prompt)
create_file(filename, user_prompt, response)
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
if len(file_content) > 0:
st.markdown(f"**File Content Added:**\n{file_content}")
htm_files = glob.glob("*.txt")
for file in htm_files:
st.sidebar.markdown(get_table_download_link(file), unsafe_allow_html=True)
if st.sidebar.button(f"π Delete {file}"):
os.remove(file)
st.experimental_rerun()
if __name__ == "__main__":
main() |