Releasing Demo Application
Browse files- .chainlit/.langchain.db +0 -0
- .chainlit/config.toml +63 -0
- Dockerfile +11 -0
- __pycache__/app.cpython-39.pyc +0 -0
- __pycache__/tools.cpython-39.pyc +0 -0
- app.py +98 -0
- chainlit.md +14 -0
- tools.py +60 -0
.chainlit/.langchain.db
ADDED
Binary file (12.3 kB). View file
|
|
.chainlit/config.toml
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# If true (default), the app will be available to anonymous users.
|
3 |
+
# If false, users will need to authenticate and be part of the project to use the app.
|
4 |
+
public = true
|
5 |
+
|
6 |
+
# The project ID (found on https://cloud.chainlit.io).
|
7 |
+
# The project ID is required when public is set to false or when using the cloud database.
|
8 |
+
#id = ""
|
9 |
+
|
10 |
+
# Uncomment if you want to persist the chats.
|
11 |
+
# local will create a database in your .chainlit directory (requires node.js installed).
|
12 |
+
# cloud will use the Chainlit cloud database.
|
13 |
+
# custom will load use your custom client.
|
14 |
+
# database = "local"
|
15 |
+
|
16 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
17 |
+
enable_telemetry = true
|
18 |
+
|
19 |
+
# List of environment variables to be provided by each user to use the app.
|
20 |
+
user_env = []
|
21 |
+
|
22 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
23 |
+
session_timeout = 3600
|
24 |
+
|
25 |
+
[UI]
|
26 |
+
# Name of the app and chatbot.
|
27 |
+
name = "Chatbot"
|
28 |
+
|
29 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
30 |
+
# description = ""
|
31 |
+
|
32 |
+
# The default value for the expand messages settings.
|
33 |
+
default_expand_messages = false
|
34 |
+
|
35 |
+
# Hide the chain of thought details from the user in the UI.
|
36 |
+
hide_cot = false
|
37 |
+
|
38 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
39 |
+
# github = ""
|
40 |
+
|
41 |
+
# Override default MUI light theme. (Check theme.ts)
|
42 |
+
[UI.theme.light]
|
43 |
+
#background = "#FAFAFA"
|
44 |
+
#paper = "#FFFFFF"
|
45 |
+
|
46 |
+
[UI.theme.light.primary]
|
47 |
+
#main = "#F80061"
|
48 |
+
#dark = "#980039"
|
49 |
+
#light = "#FFE7EB"
|
50 |
+
|
51 |
+
# Override default MUI dark theme. (Check theme.ts)
|
52 |
+
[UI.theme.dark]
|
53 |
+
#background = "#FAFAFA"
|
54 |
+
#paper = "#FFFFFF"
|
55 |
+
|
56 |
+
[UI.theme.dark.primary]
|
57 |
+
#main = "#F80061"
|
58 |
+
#dark = "#980039"
|
59 |
+
#light = "#FFE7EB"
|
60 |
+
|
61 |
+
|
62 |
+
[meta]
|
63 |
+
generated_by = "0.6.2"
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
__pycache__/app.cpython-39.pyc
ADDED
Binary file (2.63 kB). View file
|
|
__pycache__/tools.cpython-39.pyc
ADDED
Binary file (1.59 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, AgentType, initialize_agent
|
2 |
+
from langchain.agents.structured_chat.prompt import SUFFIX
|
3 |
+
from langchain.chat_models import ChatOpenAI
|
4 |
+
from langchain.memory import ConversationBufferMemory
|
5 |
+
from tools import generate_image_tool
|
6 |
+
|
7 |
+
import chainlit as cl
|
8 |
+
from chainlit.action import Action
|
9 |
+
from chainlit.input_widget import Select, Switch, Slider
|
10 |
+
|
11 |
+
|
12 |
+
@cl.author_rename
|
13 |
+
def rename(orig_author):
|
14 |
+
mapping = {
|
15 |
+
"LLMChain": "Assistant",
|
16 |
+
}
|
17 |
+
return mapping.get(orig_author, orig_author)
|
18 |
+
|
19 |
+
|
20 |
+
@cl.cache
|
21 |
+
def get_memory():
|
22 |
+
return ConversationBufferMemory(memory_key="chat_history")
|
23 |
+
|
24 |
+
|
25 |
+
@cl.on_chat_start
|
26 |
+
async def start():
|
27 |
+
settings = await cl.ChatSettings(
|
28 |
+
[
|
29 |
+
Select(
|
30 |
+
id="Model",
|
31 |
+
label="OpenAI - Model",
|
32 |
+
values=["gpt-3.5-turbo", "gpt-4-1106-preview"],
|
33 |
+
initial_index=1,
|
34 |
+
),
|
35 |
+
Switch(id="Streaming", label="OpenAI - Stream Tokens", initial=True),
|
36 |
+
Slider(
|
37 |
+
id="Temperature",
|
38 |
+
label="OpenAI - Temperature",
|
39 |
+
initial=0,
|
40 |
+
min=0,
|
41 |
+
max=2,
|
42 |
+
step=0.1,
|
43 |
+
),
|
44 |
+
]
|
45 |
+
).send()
|
46 |
+
await setup_agent(settings)
|
47 |
+
|
48 |
+
|
49 |
+
@cl.on_settings_update
|
50 |
+
async def setup_agent(settings):
|
51 |
+
print("Setup agent with following settings: ", settings)
|
52 |
+
|
53 |
+
llm = ChatOpenAI(
|
54 |
+
temperature=settings["Temperature"],
|
55 |
+
streaming=settings["Streaming"],
|
56 |
+
model=settings["Model"],
|
57 |
+
)
|
58 |
+
memory = get_memory()
|
59 |
+
_SUFFIX = "Chat history:\n{chat_history}\n\n" + SUFFIX
|
60 |
+
|
61 |
+
agent = initialize_agent(
|
62 |
+
llm=llm,
|
63 |
+
tools=[generate_image_tool],
|
64 |
+
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
65 |
+
memory=memory,
|
66 |
+
agent_kwargs={
|
67 |
+
"suffix": _SUFFIX,
|
68 |
+
"input_variables": ["input", "agent_scratchpad", "chat_history"],
|
69 |
+
},
|
70 |
+
)
|
71 |
+
cl.user_session.set("agent", agent)
|
72 |
+
|
73 |
+
|
74 |
+
@cl.on_message
|
75 |
+
async def main(message: cl.Message):
|
76 |
+
agent = cl.user_session.get("agent") # type: AgentExecutor
|
77 |
+
cl.user_session.set("generated_image", None)
|
78 |
+
|
79 |
+
# No async implementation in the Stability AI client, fallback to sync
|
80 |
+
res = await cl.make_async(agent.run)(
|
81 |
+
input=message.content, callbacks=[cl.LangchainCallbackHandler()]
|
82 |
+
)
|
83 |
+
|
84 |
+
elements = []
|
85 |
+
actions = []
|
86 |
+
|
87 |
+
generated_image_name = cl.user_session.get("generated_image")
|
88 |
+
generated_image = cl.user_session.get(generated_image_name)
|
89 |
+
if generated_image:
|
90 |
+
elements = [
|
91 |
+
cl.Image(
|
92 |
+
content=generated_image,
|
93 |
+
name=generated_image_name,
|
94 |
+
display="inline",
|
95 |
+
)
|
96 |
+
]
|
97 |
+
|
98 |
+
await cl.Message(content=res, elements=elements, actions=actions).send()
|
chainlit.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Welcome to Chainlit! ππ€
|
2 |
+
|
3 |
+
Hi there, Developer! π We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
|
4 |
+
|
5 |
+
## Useful Links π
|
6 |
+
|
7 |
+
- **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) π
|
8 |
+
- **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/ZThrUxbAYw) to ask questions, share your projects, and connect with other developers! π¬
|
9 |
+
|
10 |
+
We can't wait to see what you create with Chainlit! Happy coding! π»π
|
11 |
+
|
12 |
+
## Welcome screen
|
13 |
+
|
14 |
+
To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
|
tools.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import os
|
3 |
+
from openai import OpenAI
|
4 |
+
from langchain.tools import StructuredTool, Tool
|
5 |
+
from io import BytesIO
|
6 |
+
import requests
|
7 |
+
import json
|
8 |
+
from io import BytesIO
|
9 |
+
|
10 |
+
import chainlit as cl
|
11 |
+
|
12 |
+
|
13 |
+
def get_image_name():
|
14 |
+
image_count = cl.user_session.get("image_count")
|
15 |
+
if image_count is None:
|
16 |
+
image_count = 0
|
17 |
+
else:
|
18 |
+
image_count += 1
|
19 |
+
|
20 |
+
cl.user_session.set("image_count", image_count)
|
21 |
+
|
22 |
+
return f"image-{image_count}"
|
23 |
+
|
24 |
+
|
25 |
+
def _generate_image(prompt: str):
|
26 |
+
client = OpenAI()
|
27 |
+
|
28 |
+
response = client.images.generate(
|
29 |
+
model="dall-e-3",
|
30 |
+
prompt=prompt,
|
31 |
+
size="1024x1024",
|
32 |
+
quality="standard",
|
33 |
+
n=1,
|
34 |
+
)
|
35 |
+
|
36 |
+
image_payload = requests.get(response.data[0].url, stream=True)
|
37 |
+
|
38 |
+
image_bytes = BytesIO(image_payload.content)
|
39 |
+
|
40 |
+
print(type(image_bytes))
|
41 |
+
|
42 |
+
name = get_image_name()
|
43 |
+
cl.user_session.set(name, image_bytes.getvalue())
|
44 |
+
cl.user_session.set("generated_image", name)
|
45 |
+
return name
|
46 |
+
|
47 |
+
|
48 |
+
def generate_image(prompt: str):
|
49 |
+
image_name = _generate_image(prompt)
|
50 |
+
return f"Here is {image_name}."
|
51 |
+
|
52 |
+
|
53 |
+
generate_image_format = '{{"prompt": "prompt"}}'
|
54 |
+
|
55 |
+
generate_image_tool = Tool.from_function(
|
56 |
+
func=generate_image,
|
57 |
+
name="GenerateImage",
|
58 |
+
description=f"Useful to create an image from a text prompt. Input should be a single string strictly in the following JSON format: {generate_image_format}",
|
59 |
+
return_direct=True,
|
60 |
+
)
|