rag_study_merge2 (#2)
Browse files- Adding rag study updates (148b40923b7125d9750b4c2e8e058ccb3b4b5efb)
- .gitattributes +2 -0
- .gitignore +9 -2
- Dockerfile +13 -11
- README.md +7 -1
- Start.py +27 -0
- config/config.json +44 -0
- config/index_data.json +13 -0
- data/AMS/AMS_1996.pdf +3 -0
- data/AMS/AMS_1997.pdf +3 -0
- data/AMS/AMS_1998.pdf +3 -0
- data/AMS/AMS_1999.pdf +3 -0
- data/AMS/AMS_2000.pdf +3 -0
- data/AMS/AMS_2001.pdf +3 -0
- data/AMS/AMS_2002.pdf +3 -0
- data/AMS/AMS_2004.pdf +3 -0
- data/AMS/AMS_2006.pdf +3 -0
- data/AMS/AMS_2008.pdf +3 -0
- data/AMS/AMS_2010.pdf +3 -0
- data/AMS/AMS_2012.pdf +3 -0
- data/AMS/AMS_2014.pdf +3 -0
- data/AMS/AMS_2016.pdf +3 -0
- data/AMS/AMS_2018.pdf +3 -0
- data/AMS/AMS_2020.pdf +3 -0
- data/AMS/AMS_2022.pdf +3 -0
- data/AMS/README.txt +18 -0
- data/AMS/ams_data-400-0-50.json +0 -0
- data/AMS/ams_data-400-0.jsonl +3 -0
- data/AMS/ams_data-5000-0.jsonl +3 -0
- data_import.py +262 -0
- pages/1_Chatbot_AMS_Langchain.py +0 -152
- pages/1_Chatbot_AMS_Modular.py +158 -0
- pages/2_Chatbot_AMS_Canopy.py +0 -157
- pages/2_Document_Upload.py +109 -0
- pages/3_Visualize_Data.py +110 -0
- prompts.py +6 -63
- queries.py +236 -113
- setup.py +140 -0
.gitattributes
CHANGED
@@ -32,4 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
37 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1,3 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
.venv/
|
2 |
-
|
3 |
-
|
|
|
|
1 |
+
|
2 |
+
.env
|
3 |
+
*.log
|
4 |
+
# *.pdf
|
5 |
+
*.DS_Store
|
6 |
+
.ragatouille/
|
7 |
.venv/
|
8 |
+
db/
|
9 |
+
scripts/__pycache__
|
10 |
+
scripts/tmp_trainer
|
Dockerfile
CHANGED
@@ -2,22 +2,24 @@
|
|
2 |
FROM python:3.11.1
|
3 |
|
4 |
# Set the working directory in the container
|
5 |
-
WORKDIR /app
|
6 |
|
7 |
# Install poetry
|
8 |
-
|
9 |
|
10 |
-
# Copy the
|
11 |
-
COPY . .
|
12 |
|
13 |
-
#
|
14 |
-
#
|
15 |
-
|
16 |
-
# Streamlit must be installed separately. Potentially this will cause an issue with dependencies in the future, but it's the only way it works.
|
17 |
-
# RUN pip3 install streamlit
|
18 |
|
19 |
# Install dependencies
|
20 |
-
RUN pip3 install -r requirements.txt
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# Make a port available to the world outside this container
|
23 |
# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime. Your container needs to listen to Streamlit’s (default) port 8501.
|
@@ -27,7 +29,7 @@ EXPOSE 8501
|
|
27 |
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
|
28 |
|
29 |
# An ENTRYPOINT allows you to configure a container that will run as an executable. Here, it also contains the entire streamlit run command for your app, so you don’t have to call it from the command line
|
30 |
-
ENTRYPOINT ["streamlit", "run", "
|
31 |
|
32 |
# Execute with:
|
33 |
# docker build -t <image_name> .
|
|
|
2 |
FROM python:3.11.1
|
3 |
|
4 |
# Set the working directory in the container
|
5 |
+
WORKDIR /usr/src/app
|
6 |
|
7 |
# Install poetry
|
8 |
+
RUN pip3 install poetry
|
9 |
|
10 |
+
# Copy only the necessary files for installing dependencies
|
11 |
+
COPY pyproject.toml poetry.lock ./
|
12 |
|
13 |
+
# Disable virtual environments creation by Poetry
|
14 |
+
# as the Docker container itself is an isolated environment
|
15 |
+
RUN poetry config virtualenvs.create false
|
|
|
|
|
16 |
|
17 |
# Install dependencies
|
18 |
+
# RUN pip3 install -r requirements.txt
|
19 |
+
RUN poetry install
|
20 |
+
|
21 |
+
# Copy the current directory contents into the container at /usr/src/app
|
22 |
+
COPY . .
|
23 |
|
24 |
# Make a port available to the world outside this container
|
25 |
# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime. Your container needs to listen to Streamlit’s (default) port 8501.
|
|
|
29 |
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
|
30 |
|
31 |
# An ENTRYPOINT allows you to configure a container that will run as an executable. Here, it also contains the entire streamlit run command for your app, so you don’t have to call it from the command line
|
32 |
+
ENTRYPOINT ["streamlit", "run", "Start.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
33 |
|
34 |
# Execute with:
|
35 |
# docker build -t <image_name> .
|
README.md
CHANGED
@@ -8,4 +8,10 @@ pinned: false
|
|
8 |
app_port: 8501
|
9 |
---
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
app_port: 8501
|
9 |
---
|
10 |
|
11 |
+
# aerospace_chatbot
|
12 |
+
Aerospace discipline-specific chatbots and AI tools.
|
13 |
+
|
14 |
+
## Dependencies
|
15 |
+
Dependencies are managed with [poetry](https://python-poetry.org/). Detailed install instructions are located [here](https://www.evernote.com/shard/s84/sh/f37de730-ce37-cd28-789c-86c3dc024a7c/90VLNref38KARua10p4am7IZkwsOxo93fXuBNqba-HpeIkMqGpRZrRkmjw)
|
16 |
+
* Once poetry is installed, run the following to install all dependencies: <code>poetry install</code>
|
17 |
+
* poetry.lock and pyproject.toml are committed to this directory and are the working dependencies.
|
Start.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Set up page
|
5 |
+
st.set_page_config(
|
6 |
+
page_title="Aerospace Chatbot: AMS",
|
7 |
+
)
|
8 |
+
st.title("Aerospace Chatbot Homepage")
|
9 |
+
st.markdown("Code base: https://github.com/dsmueller3760/aerospace_chatbot/tree/rag_study")
|
10 |
+
st.markdown('---')
|
11 |
+
st.title("Chatbots")
|
12 |
+
st.markdown("""
|
13 |
+
Chatbots for aerospace mechanisms symposia, using all available papers published since 2000
|
14 |
+
* Modular version meant to study retrieval methods
|
15 |
+
""")
|
16 |
+
st.subheader("AMS")
|
17 |
+
'''
|
18 |
+
This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
|
19 |
+
* Available models: https://platform.openai.com/docs/models
|
20 |
+
* Model parameters: https://platform.openai.com/docs/api-reference/chat/create
|
21 |
+
* Pinecone: https://docs.pinecone.io/docs/projects#api-keys
|
22 |
+
* OpenAI API: https://platform.openai.com/api-keys
|
23 |
+
'''
|
24 |
+
|
25 |
+
# # Establish secrets
|
26 |
+
# PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
|
27 |
+
# PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
|
config/config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"databases": [
|
3 |
+
{
|
4 |
+
"name": "Pinecone",
|
5 |
+
"embedding_models": ["Openai", "Voyage"]
|
6 |
+
},
|
7 |
+
{
|
8 |
+
"name": "ChromaDB",
|
9 |
+
"embedding_models": ["Openai"]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"name": "RAGatouille",
|
13 |
+
"hf_rag_models": [
|
14 |
+
"colbert-ir/colbertv2.0"
|
15 |
+
]
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"llms": [
|
19 |
+
{
|
20 |
+
"name": "OpenAI",
|
21 |
+
"models": [
|
22 |
+
"gpt-3.5-turbo-1106",
|
23 |
+
"gpt-3.5-turbo-instruct",
|
24 |
+
"gpt-4",
|
25 |
+
"gpt-4-32k",
|
26 |
+
"gpt-4-1106-preview"
|
27 |
+
]
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "Hugging Face",
|
31 |
+
"models": [
|
32 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
33 |
+
"ai-aerospace/autotrain-ams_v0.1_100_Mistral-7B-Instruct-v0.1",
|
34 |
+
"meta-llama/Llama-2-7b-chat-hf"
|
35 |
+
]
|
36 |
+
}
|
37 |
+
],
|
38 |
+
"rag_types": [
|
39 |
+
"Standard",
|
40 |
+
"Parent-Child",
|
41 |
+
"Hypothetical Questions",
|
42 |
+
"Summaries"
|
43 |
+
]
|
44 |
+
}
|
config/index_data.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Pinecone": {
|
3 |
+
"Openai": "pinecone-openai-ams",
|
4 |
+
"Voyage": "pinecone-voyage-ams"
|
5 |
+
},
|
6 |
+
"ChromaDB": {
|
7 |
+
"Openai": "chromadb-openai-ams",
|
8 |
+
"Voyage": "chromadb-voyage-ams"
|
9 |
+
},
|
10 |
+
"RAGatouille": {
|
11 |
+
"colbert-ir/colbertv2.0": "RAGatouille-colbertv2.0-ams"
|
12 |
+
}
|
13 |
+
}
|
data/AMS/AMS_1996.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3626fd4a0769b8a73a12ee79a1bec7c264c541a5bf90df6f6c13c1ff00011b24
|
3 |
+
size 152158068
|
data/AMS/AMS_1997.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34442bbc794415ea8d778ebd57e1dd368e20c5e6f65aff35fa008af54dbb900a
|
3 |
+
size 22719877
|
data/AMS/AMS_1998.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1523ca03cd1254b81cd0cb285182b7ac40208cba7932972ca00e0942e43f3539
|
3 |
+
size 122280718
|
data/AMS/AMS_1999.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1c631364761565d749e6bafb0ab1e84611e773ccdb640ab08f6b32b1fcc49e1e
|
3 |
+
size 27850919
|
data/AMS/AMS_2000.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ddf89c5cd9ddbd225e77198b19274535d4f003fdc20b5823239f51ad48230549
|
3 |
+
size 24061146
|
data/AMS/AMS_2001.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c63b2bba5a892759a7298097ee2388f353cc974285a73bfd8635d48af9f7d945
|
3 |
+
size 23264984
|
data/AMS/AMS_2002.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b8b60c30ea9843face46e021a80bd1072901596b8e0f98a63601b31ecac2076
|
3 |
+
size 41615570
|
data/AMS/AMS_2004.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:986a7f046ba336d35d9d0db974931940543d612dad2c9bb6d5976d778777b659
|
3 |
+
size 28914300
|
data/AMS/AMS_2006.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af4fb8e67c1ebf7b51fddd947d531d68ab05ff187fe915528811676ae0083d55
|
3 |
+
size 61039456
|
data/AMS/AMS_2008.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d74dcd8ef68ae324f9246a35e1ccf538c4fd676d8b1ae733191c8ad6a055c90
|
3 |
+
size 31961158
|
data/AMS/AMS_2010.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30d9ad0b75d0d41c75926dd97361f1548b79920df61d8d7486978d4b69a00ef6
|
3 |
+
size 30161812
|
data/AMS/AMS_2012.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e26a981f74c9d0c3526ad5152c83ad9fabde8f197f69cb24a0fd1d4004c1f026
|
3 |
+
size 31088140
|
data/AMS/AMS_2014.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73dea6c8c45d0103404e3e3bd764e6efcd0f5bf5f45d505ce98e6c07528d9322
|
3 |
+
size 35199422
|
data/AMS/AMS_2016.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7d8a0e558abd59b94abcbe013f263755f3c525eaf73702662293a3d8b5e2ec5
|
3 |
+
size 35244294
|
data/AMS/AMS_2018.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b929f0c6d71116e23d4f52011e82eda07280aabb177300e37419ca38b047c60
|
3 |
+
size 30251124
|
data/AMS/AMS_2020.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb6aaaa2cb700bc7d460a1f222756e6a795b629780087a477acd9713982fc0b9
|
3 |
+
size 45426669
|
data/AMS/AMS_2022.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ccc90819f501fca9445d415c1ceca8d3991300f8e08724cf7043f1a103aa4231
|
3 |
+
size 17636761
|
data/AMS/README.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Documents are not uploaded to git. The list of documents which were uploaded to pinecone database AMS:
|
2 |
+
AMS_1996, https://ntrs.nasa.gov/citations/19960025595
|
3 |
+
AMS_1997, https://ntrs.nasa.gov/citations/19970021613
|
4 |
+
AMS_1998, https://ntrs.nasa.gov/citations/19980193156
|
5 |
+
AMS_1999, https://ntrs.nasa.gov/citations/19990053852
|
6 |
+
AMS_2000, https://ntrs.nasa.gov/citations/20000048380
|
7 |
+
AMS_2001, https://ntrs.nasa.gov/citations/20010071164
|
8 |
+
AMS_2002, https://ntrs.nasa.gov/citations/20020050182
|
9 |
+
AMS_2004, https://ntrs.nasa.gov/citations/20040084272
|
10 |
+
AMS_2006, https://ntrs.nasa.gov/citations/20060028221
|
11 |
+
AMS_2008, https://ntrs.nasa.gov/citations/20080023060
|
12 |
+
AMS_2010, https://ntrs.nasa.gov/citations/20100021914
|
13 |
+
AMS_2012, https://ntrs.nasa.gov/citations/20130008824
|
14 |
+
AMS_2014, https://ntrs.nasa.gov/citations/20140008875
|
15 |
+
AMS_2016, https://ntrs.nasa.gov/citations/20160004038
|
16 |
+
AMS_2018, https://ntrs.nasa.gov/citations/20180002828
|
17 |
+
AMS_2020, https://ntrs.nasa.gov/citations/20205009766
|
18 |
+
AMS_2022, https://ntrs.nasa.gov/citations/20220006415
|
data/AMS/ams_data-400-0-50.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/AMS/ams_data-400-0.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef248f60645d1def4d3624351c90cbb5d91554d0a8bfd35615514f4a71a20159
|
3 |
+
size 18183603
|
data/AMS/ams_data-5000-0.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0472930c89ad2c13f997789b070049c99640c6ddcd114cc635110409854435b5
|
3 |
+
size 17283048
|
data_import.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import logging
|
4 |
+
import shutil
|
5 |
+
import string
|
6 |
+
|
7 |
+
import pinecone
|
8 |
+
import chromadb
|
9 |
+
|
10 |
+
import json, jsonlines
|
11 |
+
from tqdm import tqdm
|
12 |
+
|
13 |
+
from langchain_community.vectorstores import Pinecone
|
14 |
+
from langchain_community.vectorstores import Chroma
|
15 |
+
|
16 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
|
17 |
+
|
18 |
+
from langchain_openai import OpenAIEmbeddings
|
19 |
+
from langchain_community.embeddings import VoyageEmbeddings
|
20 |
+
|
21 |
+
from langchain_community.document_loaders import PyPDFLoader
|
22 |
+
from langchain_core.documents import Document as lancghain_Document
|
23 |
+
|
24 |
+
from ragatouille import RAGPretrainedModel
|
25 |
+
|
26 |
+
from dotenv import load_dotenv,find_dotenv
|
27 |
+
load_dotenv(find_dotenv(),override=True)
|
28 |
+
|
29 |
+
# Set secrets from environment file
|
30 |
+
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
31 |
+
VOYAGE_API_KEY=os.getenv('VOYAGE_API_KEY')
|
32 |
+
PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
|
33 |
+
HUGGINGFACEHUB_API_TOKEN=os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
34 |
+
|
35 |
+
def chunk_docs(docs,
|
36 |
+
chunk_method='tiktoken_recursive',
|
37 |
+
file=None,
|
38 |
+
chunk_size=500,
|
39 |
+
chunk_overlap=0,
|
40 |
+
use_json=False):
|
41 |
+
docs_out=[]
|
42 |
+
if file:
|
43 |
+
logging.info('Jsonl file to be used: '+file)
|
44 |
+
if use_json and os.path.exists(file):
|
45 |
+
logging.info('Jsonl file found, using this instead of parsing docs.')
|
46 |
+
with open(file, "r") as file_in:
|
47 |
+
file_data = [json.loads(line) for line in file_in]
|
48 |
+
# Process the file data and put it into the same format as docs_out
|
49 |
+
for line in file_data:
|
50 |
+
doc_temp = lancghain_Document(page_content=line['page_content'],
|
51 |
+
source=line['metadata']['source'],
|
52 |
+
page=line['metadata']['page'],
|
53 |
+
metadata=line['metadata'])
|
54 |
+
if has_meaningful_content(doc_temp):
|
55 |
+
docs_out.append(doc_temp)
|
56 |
+
logging.info('Parsed: '+file)
|
57 |
+
logging.info('Number of entries: '+str(len(docs_out)))
|
58 |
+
logging.info('Sample entries:')
|
59 |
+
logging.info(str(docs_out[0]))
|
60 |
+
logging.info(str(docs_out[-1]))
|
61 |
+
else:
|
62 |
+
logging.info('No jsonl found. Reading and parsing docs.')
|
63 |
+
logging.info('Chunk size (tokens): '+str(chunk_size))
|
64 |
+
logging.info('Chunk overlap (tokens): '+str(chunk_overlap))
|
65 |
+
for doc in tqdm(docs,desc='Reading and parsing docs'):
|
66 |
+
logging.info('Parsing: '+doc)
|
67 |
+
loader = PyPDFLoader(doc)
|
68 |
+
data = loader.load_and_split()
|
69 |
+
|
70 |
+
if chunk_method=='tiktoken_recursive':
|
71 |
+
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
72 |
+
else:
|
73 |
+
raise NotImplementedError
|
74 |
+
pages = text_splitter.split_documents(data)
|
75 |
+
|
76 |
+
# Tidy up text by removing unnecessary characters
|
77 |
+
for page in pages:
|
78 |
+
page.metadata['source']=os.path.basename(page.metadata['source']) # Strip path
|
79 |
+
page.metadata['page']=int(page.metadata['page'])+1 # Pages are 0 based, update
|
80 |
+
page.page_content=re.sub(r"(\w+)-\n(\w+)", r"\1\2", page.page_content) # Merge hyphenated words
|
81 |
+
page.page_content = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", page.page_content.strip()) # Fix newlines in the middle of sentences
|
82 |
+
page.page_content = re.sub(r"\n\s*\n", "\n\n", page.page_content) # Remove multiple newlines
|
83 |
+
# Add metadata to the end of the page content, some RAG models don't have metadata.
|
84 |
+
page.page_content += str(page.metadata)
|
85 |
+
doc_temp=lancghain_Document(page_content=page.page_content,
|
86 |
+
source=page.metadata['source'],
|
87 |
+
page=page.metadata['page'],
|
88 |
+
metadata=page.metadata)
|
89 |
+
if has_meaningful_content(page):
|
90 |
+
docs_out.append(doc_temp)
|
91 |
+
logging.info('Parsed: '+doc)
|
92 |
+
logging.info('Sample entries:')
|
93 |
+
logging.info(str(docs_out[0]))
|
94 |
+
logging.info(str(docs_out[-1]))
|
95 |
+
if file:
|
96 |
+
# Write to a jsonl file, save it.
|
97 |
+
logging.info('Writing to jsonl file: '+file)
|
98 |
+
with jsonlines.open(file, mode='w') as writer:
|
99 |
+
for doc in docs_out:
|
100 |
+
writer.write(doc.dict())
|
101 |
+
logging.info('Written: '+file)
|
102 |
+
return docs_out
|
103 |
+
def load_docs(index_type,
|
104 |
+
docs,
|
105 |
+
query_model,
|
106 |
+
index_name=None,
|
107 |
+
chunk_method='tiktoken_recursive',
|
108 |
+
chunk_size=500,
|
109 |
+
chunk_overlap=0,
|
110 |
+
clear=False,
|
111 |
+
use_json=False,
|
112 |
+
file=None,
|
113 |
+
batch_size=50):
|
114 |
+
"""
|
115 |
+
Loads PDF documents. If index_name is blank, it will return a list of the data (texts). If it is a name of a pinecone storage, it will return the vector_store.
|
116 |
+
"""
|
117 |
+
# Chunk docs
|
118 |
+
docs_out=chunk_docs(docs,
|
119 |
+
chunk_method=chunk_method,
|
120 |
+
file=file,
|
121 |
+
chunk_size=chunk_size,
|
122 |
+
chunk_overlap=chunk_overlap,
|
123 |
+
use_json=use_json)
|
124 |
+
# Initialize client
|
125 |
+
db_path='../db/'
|
126 |
+
if index_name:
|
127 |
+
if index_type=="Pinecone":
|
128 |
+
# Import and initialize Pinecone client
|
129 |
+
pinecone.init(
|
130 |
+
api_key=PINECONE_API_KEY
|
131 |
+
)
|
132 |
+
# Find the existing index, clear for new start
|
133 |
+
if clear:
|
134 |
+
try:
|
135 |
+
pinecone.describe_index(index_name)
|
136 |
+
except:
|
137 |
+
raise Exception(f"Cannot clear index {index_name} because it does not exist.")
|
138 |
+
index=pinecone.Index(index_name)
|
139 |
+
index.delete(delete_all=True) # Clear the index first, then upload
|
140 |
+
logging.info('Cleared database '+index_name)
|
141 |
+
# Upsert docs
|
142 |
+
try:
|
143 |
+
pinecone.describe_index(index_name)
|
144 |
+
except:
|
145 |
+
logging.info(f"Index {index_name} does not exist. Creating new index.")
|
146 |
+
logging.info('Size of embedding used: '+str(embedding_size(query_model))) # TODO: set this to be backed out of the embedding size
|
147 |
+
pinecone.create_index(index_name,dimension=embedding_size(query_model))
|
148 |
+
logging.info(f"Index {index_name} created. Adding {len(docs_out)} entries to index.")
|
149 |
+
pass
|
150 |
+
else:
|
151 |
+
logging.info(f"Index {index_name} exists. Adding {len(docs_out)} entries to index.")
|
152 |
+
index = pinecone.Index(index_name)
|
153 |
+
vectorstore = Pinecone(index, query_model, "page_content") # Set the vector store to calculate embeddings on page_content
|
154 |
+
vectorstore = batch_upsert(index_type,
|
155 |
+
vectorstore,
|
156 |
+
docs_out,
|
157 |
+
batch_size=batch_size)
|
158 |
+
elif index_type=="ChromaDB":
|
159 |
+
# Upsert docs. Defaults to putting this in the ../db directory
|
160 |
+
logging.info(f"Creating new index {index_name}.")
|
161 |
+
persistent_client = chromadb.PersistentClient(path=db_path+'/chromadb')
|
162 |
+
vectorstore = Chroma(client=persistent_client,
|
163 |
+
collection_name=index_name,
|
164 |
+
embedding_function=query_model)
|
165 |
+
logging.info(f"Index {index_name} created. Adding {len(docs_out)} entries to index.")
|
166 |
+
vectorstore = batch_upsert(index_type,
|
167 |
+
vectorstore,
|
168 |
+
docs_out,
|
169 |
+
batch_size=batch_size)
|
170 |
+
logging.info("Documents upserted to f{index_name}.")
|
171 |
+
# Test query
|
172 |
+
test_query = vectorstore.similarity_search('What are examples of aerosapce adhesives to avoid?')
|
173 |
+
logging.info('Test query: '+str(test_query))
|
174 |
+
if not test_query:
|
175 |
+
raise ValueError("Chroma vector database is not configured properly. Test query failed.")
|
176 |
+
elif index_type=="RAGatouille":
|
177 |
+
logging.info(f'Setting up RAGatouille model {query_model}')
|
178 |
+
vectorstore = RAGPretrainedModel.from_pretrained(query_model)
|
179 |
+
logging.info('RAGatouille model set: '+str(vectorstore))
|
180 |
+
|
181 |
+
# Create an index from the vectorstore.
|
182 |
+
docs_out_colbert = [doc.page_content for doc in docs_out]
|
183 |
+
if chunk_size>500:
|
184 |
+
raise ValueError("RAGatouille cannot handle chunks larger than 500 tokens. Reduce token count.")
|
185 |
+
vectorstore.index(
|
186 |
+
collection=docs_out_colbert,
|
187 |
+
index_name=index_name,
|
188 |
+
max_document_length=chunk_size,
|
189 |
+
overwrite_index=True,
|
190 |
+
split_documents=True,
|
191 |
+
)
|
192 |
+
logging.info(f"Index created: {vectorstore}")
|
193 |
+
|
194 |
+
# Move the directory to the db folder
|
195 |
+
logging.info(f"Moving RAGatouille index to {db_path}")
|
196 |
+
ragatouille_path = os.path.join(db_path, '.ragatouille')
|
197 |
+
if os.path.exists(ragatouille_path):
|
198 |
+
shutil.rmtree(ragatouille_path)
|
199 |
+
logging.info(f"RAGatouille index deleted from {ragatouille_path}")
|
200 |
+
shutil.move('./.ragatouille', db_path)
|
201 |
+
logging.info(f"RAGatouille index created in {db_path}:"+str(vectorstore))
|
202 |
+
|
203 |
+
# Return vectorstore or docs
|
204 |
+
if index_name:
|
205 |
+
return vectorstore
|
206 |
+
else:
|
207 |
+
return docs_out
|
208 |
+
def delete_index(index_type,index_name):
|
209 |
+
"""
|
210 |
+
Deletes an existing Pinecone index with the given index_name.
|
211 |
+
"""
|
212 |
+
if index_type=="Pinecone":
|
213 |
+
# Import and initialize Pinecone client
|
214 |
+
pinecone.init(
|
215 |
+
api_key=PINECONE_API_KEY
|
216 |
+
)
|
217 |
+
try:
|
218 |
+
pinecone.describe_index(index_name)
|
219 |
+
logging.info(f"Index {index_name} exists.")
|
220 |
+
except:
|
221 |
+
raise Exception(f"Index {index_name} does not exist, cannot delete.")
|
222 |
+
else:
|
223 |
+
pinecone.delete_index(index_name)
|
224 |
+
logging.info(f"Index {index_name} deleted.")
|
225 |
+
elif index_type=="ChromaDB":
|
226 |
+
# Delete existing collection
|
227 |
+
logging.info(f"Deleting index {index_name}.")
|
228 |
+
persistent_client = chromadb.PersistentClient(path='../db/chromadb')
|
229 |
+
persistent_client.delete_collection(name=index_name)
|
230 |
+
logging.info("Index deleted.")
|
231 |
+
elif index_type=="RAGatouille":
|
232 |
+
raise NotImplementedError
|
233 |
+
def batch_upsert(index_type,vectorstore,docs_out,batch_size=50):
|
234 |
+
# Batch insert the chunks into the vector store
|
235 |
+
for i in range(0, len(docs_out), batch_size):
|
236 |
+
chunk_batch = docs_out[i:i + batch_size]
|
237 |
+
if index_type=="Pinecone":
|
238 |
+
vectorstore.add_documents(chunk_batch)
|
239 |
+
elif index_type=="ChromaDB":
|
240 |
+
vectorstore.add_documents(chunk_batch) # Happens to be same for chroma/pinecone, leaving if statement just in case
|
241 |
+
return vectorstore
|
242 |
+
def has_meaningful_content(page):
|
243 |
+
"""
|
244 |
+
Test whether the page has more than 30% words and is more than 5 words.
|
245 |
+
"""
|
246 |
+
text=page.page_content
|
247 |
+
num_words = len(text.split())
|
248 |
+
alphanumeric_pct = sum(c.isalnum() for c in text) / len(text)
|
249 |
+
if num_words < 5 or alphanumeric_pct < 0.3:
|
250 |
+
return False
|
251 |
+
else:
|
252 |
+
return True
|
253 |
+
def embedding_size(embedding_model):
|
254 |
+
"""
|
255 |
+
Returns the embedding size of the model.
|
256 |
+
"""
|
257 |
+
if isinstance(embedding_model,OpenAIEmbeddings):
|
258 |
+
return 1536 # https://platform.openai.com/docs/models/embeddings, test-embedding-ada-002
|
259 |
+
elif isinstance(embedding_model,VoyageEmbeddings):
|
260 |
+
return 1024 # https://docs.voyageai.com/embeddings/, voyage-02
|
261 |
+
else:
|
262 |
+
raise NotImplementedError
|
pages/1_Chatbot_AMS_Langchain.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import queries
|
3 |
-
import pinecone
|
4 |
-
from dotenv import load_dotenv, find_dotenv
|
5 |
-
from langchain.embeddings import OpenAIEmbeddings
|
6 |
-
from langchain.llms import OpenAI
|
7 |
-
import streamlit as st
|
8 |
-
import openai
|
9 |
-
import time
|
10 |
-
|
11 |
-
from dotenv import load_dotenv,find_dotenv,dotenv_values
|
12 |
-
load_dotenv(find_dotenv(),override=True)
|
13 |
-
|
14 |
-
# Set secrets
|
15 |
-
# PINECONE_ENVIRONMENT=db.secrets.get('PINECONE_ENVIRONMENT')
|
16 |
-
# PINECONE_API_KEY=db.secrets.get('PINECONE_API_KEY')
|
17 |
-
PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
|
18 |
-
PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
|
19 |
-
|
20 |
-
# Set the page title
|
21 |
-
st.set_page_config(
|
22 |
-
page_title='Aerospace Chatbot: AMS w/Langchain',
|
23 |
-
)
|
24 |
-
st.title('Aerospace Mechanisms Chatbot')
|
25 |
-
with st.expander('''What's under the hood?'''):
|
26 |
-
st.markdown('''
|
27 |
-
This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
|
28 |
-
* Source code: https://github.com/dsmueller3760/aerospace_chatbot/blob/main/scripts/setup_page_langchain.py
|
29 |
-
* Uses custom langchain functions with QA retrieval: https://js.langchain.com/docs/modules/chains/popular/chat_vector_db_legacy
|
30 |
-
* All prompts will query entire database unless 'filter response with last received sources' is activated.
|
31 |
-
* **Repsonse time ~10 seconds per prompt**.
|
32 |
-
''')
|
33 |
-
filter_toggle=st.checkbox('Filter response with last received sources?')
|
34 |
-
|
35 |
-
# Add a sidebar for input options
|
36 |
-
st.title('Input')
|
37 |
-
|
38 |
-
# Add input fields in the sidebar
|
39 |
-
st.sidebar.title('Input options')
|
40 |
-
output_level = st.sidebar.selectbox('Level of Output', ['Concise', 'Detailed'], index=1)
|
41 |
-
k = st.sidebar.number_input('Number of items per prompt', min_value=1, step=1, value=4)
|
42 |
-
search_type = st.sidebar.selectbox('Search Type', ['similarity', 'mmr'], index=1)
|
43 |
-
temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=2.0, value=0.0, step=0.1)
|
44 |
-
verbose = st.sidebar.checkbox('Verbose output')
|
45 |
-
chain_type = st.sidebar.selectbox('Chain Type', ['stuff', 'map_reduce'], index=0)
|
46 |
-
|
47 |
-
# Vector databases
|
48 |
-
st.sidebar.title('Vector database')
|
49 |
-
index_type=st.sidebar.selectbox('Index type', ['Pinecone'], index=0)
|
50 |
-
index_name=st.sidebar.selectbox('Index name', ['canopy--ams'], index=0)
|
51 |
-
|
52 |
-
# Embeddings
|
53 |
-
st.sidebar.title('Embeddings')
|
54 |
-
embedding_type=st.sidebar.selectbox('Embedding type', ['Openai'], index=0)
|
55 |
-
embedding_name=st.sidebar.selectbox('Embedding name', ['text-embedding-ada-002'], index=0)
|
56 |
-
|
57 |
-
# Add a section for secret keys
|
58 |
-
st.sidebar.title('Secret keys')
|
59 |
-
OPENAI_API_KEY = st.sidebar.text_input('OpenAI API Key', type='password')
|
60 |
-
|
61 |
-
# Pinecone
|
62 |
-
pinecone.init(
|
63 |
-
api_key=PINECONE_API_KEY,
|
64 |
-
environment=PINECONE_ENVIRONMENT
|
65 |
-
)
|
66 |
-
|
67 |
-
if OPENAI_API_KEY:
|
68 |
-
openai.api_key = OPENAI_API_KEY
|
69 |
-
embeddings_model = OpenAIEmbeddings(model=embedding_name,openai_api_key=OPENAI_API_KEY)
|
70 |
-
|
71 |
-
# Set up chat history
|
72 |
-
qa_model_obj = st.session_state.get('qa_model_obj',[])
|
73 |
-
message_id = st.session_state.get('message_id', 0)
|
74 |
-
|
75 |
-
if 'messages' not in st.session_state:
|
76 |
-
st.session_state.messages = []
|
77 |
-
for message in st.session_state.messages:
|
78 |
-
with st.chat_message(message['role']):
|
79 |
-
st.markdown(message['content'])
|
80 |
-
|
81 |
-
# Process some items
|
82 |
-
if output_level == 'Concise':
|
83 |
-
out_token = 50
|
84 |
-
else:
|
85 |
-
out_token = 516
|
86 |
-
|
87 |
-
# Define LLM parameters and qa model object
|
88 |
-
llm = OpenAI(temperature=temperature,
|
89 |
-
openai_api_key=OPENAI_API_KEY,
|
90 |
-
max_tokens=out_token)
|
91 |
-
qa_model_obj=queries.QA_Model(index_name,
|
92 |
-
embeddings_model,
|
93 |
-
llm,
|
94 |
-
k,
|
95 |
-
search_type,
|
96 |
-
verbose,
|
97 |
-
filter_arg=False)
|
98 |
-
|
99 |
-
# Display assistant response in chat message container
|
100 |
-
if prompt := st.chat_input('Prompt here'):
|
101 |
-
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
102 |
-
with st.chat_message('user'):
|
103 |
-
st.markdown(prompt)
|
104 |
-
with st.chat_message('assistant'):
|
105 |
-
message_placeholder = st.empty()
|
106 |
-
|
107 |
-
with st.status('Generating response...') as status:
|
108 |
-
t_start=time.time()
|
109 |
-
|
110 |
-
# Process some items
|
111 |
-
if output_level == 'Concise':
|
112 |
-
out_token = 50
|
113 |
-
else:
|
114 |
-
out_token = 516
|
115 |
-
|
116 |
-
# Define LLM parameters and qa model object
|
117 |
-
llm = OpenAI(temperature=temperature,
|
118 |
-
openai_api_key=OPENAI_API_KEY,
|
119 |
-
max_tokens=out_token)
|
120 |
-
|
121 |
-
message_id += 1
|
122 |
-
st.write('Message: '+str(message_id))
|
123 |
-
|
124 |
-
if message_id>1:
|
125 |
-
qa_model_obj=st.session_state['qa_model_obj']
|
126 |
-
qa_model_obj.update_model(llm,
|
127 |
-
k=k,
|
128 |
-
search_type=search_type,
|
129 |
-
verbose=verbose,
|
130 |
-
filter_arg=filter_toggle)
|
131 |
-
if filter_toggle:
|
132 |
-
filter_list = list(set(item['source'] for item in qa_model_obj.sources[-1]))
|
133 |
-
filter_items=[]
|
134 |
-
for item in filter_list:
|
135 |
-
filter_item={'source': item}
|
136 |
-
filter_items.append(filter_item)
|
137 |
-
filter={'$or':filter_items}
|
138 |
-
|
139 |
-
st.write('Searching vector database, generating prompt...')
|
140 |
-
qa_model_obj.query_docs(prompt)
|
141 |
-
ai_response=qa_model_obj.result['answer']
|
142 |
-
message_placeholder.markdown(ai_response)
|
143 |
-
t_delta=time.time() - t_start
|
144 |
-
status.update(label='Prompt generated in '+"{:10.3f}".format(t_delta)+' seconds', state='complete', expanded=False)
|
145 |
-
|
146 |
-
st.session_state['qa_model_obj'] = qa_model_obj
|
147 |
-
st.session_state['message_id'] = message_id
|
148 |
-
st.session_state.messages.append({'role': 'assistant', 'content': ai_response})
|
149 |
-
|
150 |
-
else:
|
151 |
-
st.warning('No API key found. Add your API key in the sidebar under Secret Keys. Find it or create one here: https://platform.openai.com/api-keys')
|
152 |
-
st.info('Your API-key is not stored in any form by this app. However, for transparency it is recommended to delete your API key once used.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pages/1_Chatbot_AMS_Modular.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import queries, setup
|
2 |
+
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import logging
|
6 |
+
import json
|
7 |
+
|
8 |
+
import pinecone
|
9 |
+
import openai
|
10 |
+
|
11 |
+
from langchain_community.vectorstores import Pinecone
|
12 |
+
from langchain_community.vectorstores import Chroma
|
13 |
+
|
14 |
+
from langchain_openai import OpenAIEmbeddings
|
15 |
+
from langchain_community.embeddings import VoyageEmbeddings
|
16 |
+
|
17 |
+
from langchain_openai import OpenAI, ChatOpenAI
|
18 |
+
from langchain_community.llms import HuggingFaceHub
|
19 |
+
|
20 |
+
from ragatouille import RAGPretrainedModel
|
21 |
+
|
22 |
+
import streamlit as st
|
23 |
+
|
24 |
+
# Set up the page, enable logging
|
25 |
+
from dotenv import load_dotenv,find_dotenv
|
26 |
+
load_dotenv(find_dotenv(),override=True)
|
27 |
+
logging.basicConfig(filename='app_1_chatbot_ams_modular.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
|
28 |
+
|
29 |
+
# Set the page title
|
30 |
+
st.set_page_config(
|
31 |
+
page_title='Aerospace Chatbot: Modular',
|
32 |
+
)
|
33 |
+
st.title('Aerospace Mechanisms Chatbot')
|
34 |
+
with st.expander('''What's under the hood?'''):
|
35 |
+
st.markdown('''
|
36 |
+
This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
|
37 |
+
Example questions:
|
38 |
+
* What are examples of latch failures which have occurred due to improper fitup?
|
39 |
+
* What are examples of lubricants which should be avoided for space mechanism applications?
|
40 |
+
''')
|
41 |
+
filter_toggle=st.checkbox('Filter response with last received sources?')
|
42 |
+
|
43 |
+
sb=setup.load_sidebar(config_file='../config/config.json',
|
44 |
+
index_data_file='../config/index_data.json',
|
45 |
+
vector_databases=True,
|
46 |
+
embeddings=True,
|
47 |
+
rag_type=True,
|
48 |
+
index_name=True,
|
49 |
+
llm=True,
|
50 |
+
model_options=True,
|
51 |
+
secret_keys=True)
|
52 |
+
|
53 |
+
secrets=setup.set_secrets(sb) # Take secrets from .env file first, otherwise from sidebar
|
54 |
+
|
55 |
+
# Set up chat history
|
56 |
+
if 'qa_model_obj' not in st.session_state:
|
57 |
+
st.session_state.qa_model_obj = []
|
58 |
+
if 'message_id' not in st.session_state:
|
59 |
+
st.session_state.message_id = 0
|
60 |
+
if 'messages' not in st.session_state:
|
61 |
+
st.session_state.messages = []
|
62 |
+
for message in st.session_state.messages:
|
63 |
+
with st.chat_message(message['role']):
|
64 |
+
st.markdown(message['content'])
|
65 |
+
|
66 |
+
# Define chat
|
67 |
+
if prompt := st.chat_input('Prompt here'):
|
68 |
+
# User prompt
|
69 |
+
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
70 |
+
with st.chat_message('user'):
|
71 |
+
st.markdown(prompt)
|
72 |
+
# Assistant response
|
73 |
+
with st.chat_message('assistant'):
|
74 |
+
message_placeholder = st.empty()
|
75 |
+
|
76 |
+
with st.status('Generating response...') as status:
|
77 |
+
t_start=time.time()
|
78 |
+
|
79 |
+
st.session_state.message_id += 1
|
80 |
+
st.write('Starting reponse generation for message: '+str(st.session_state.message_id))
|
81 |
+
logging.info('Starting reponse generation for message: '+str(st.session_state.message_id))
|
82 |
+
|
83 |
+
# Process some items
|
84 |
+
if sb['model_options']['output_level'] == 'Concise':
|
85 |
+
out_token = 50
|
86 |
+
else:
|
87 |
+
out_token = 516
|
88 |
+
logging.info('Output tokens: '+str(out_token))
|
89 |
+
|
90 |
+
if st.session_state.message_id==1:
|
91 |
+
# Define embeddings
|
92 |
+
if sb['query_model']=='Openai':
|
93 |
+
query_model=OpenAIEmbeddings(model=sb['embedding_name'],openai_api_key=secrets['OPENAI_API_KEY'])
|
94 |
+
elif sb['query_model']=='Voyage':
|
95 |
+
query_model=VoyageEmbeddings(model=sb['embedding_name'],voyage_api_key=secrets['VOYAGE_API_KEY'])
|
96 |
+
elif sb['index_type']=='RAGatouille':
|
97 |
+
query_model=RAGPretrainedModel.from_index('../db/.ragatouille/colbert/indexes/'+sb['index_name'])
|
98 |
+
logging.info('Query model set: '+str(query_model))
|
99 |
+
|
100 |
+
# Define LLM
|
101 |
+
if sb['llm_source']=='OpenAI':
|
102 |
+
llm = ChatOpenAI(model_name=sb['llm_model'],
|
103 |
+
temperature=sb['model_options']['temperature'],
|
104 |
+
openai_api_key=secrets['OPENAI_API_KEY'],
|
105 |
+
max_tokens=out_token)
|
106 |
+
elif sb['llm_source']=='Hugging Face':
|
107 |
+
llm = HuggingFaceHub(repo_id=sb['llm_model'],
|
108 |
+
model_kwargs={"temperature": sb['model_options']['temperature'], "max_length": out_token})
|
109 |
+
logging.info('LLM model set: '+str(llm))
|
110 |
+
|
111 |
+
# Initialize QA model object
|
112 |
+
if 'search_type' in sb['model_options']:
|
113 |
+
search_type=sb['model_options']['search_type']
|
114 |
+
else:
|
115 |
+
search_type=None
|
116 |
+
st.session_state.qa_model_obj=queries.QA_Model(sb['index_type'],
|
117 |
+
sb['index_name'],
|
118 |
+
query_model,
|
119 |
+
llm,
|
120 |
+
k=sb['model_options']['k'],
|
121 |
+
search_type=search_type,
|
122 |
+
filter_arg=False)
|
123 |
+
logging.info('QA model object set: '+str(st.session_state.qa_model_obj))
|
124 |
+
if st.session_state.message_id>1:
|
125 |
+
logging.info('Updating model with sidebar settings...')
|
126 |
+
# Update LLM
|
127 |
+
if sb['llm_source']=='OpenAI':
|
128 |
+
llm = ChatOpenAI(model_name=sb['llm_model'],
|
129 |
+
temperature=sb['model_options']['temperature'],
|
130 |
+
openai_api_key=secrets['OPENAI_API_KEY'],
|
131 |
+
max_tokens=out_token)
|
132 |
+
elif sb['llm_source']=='Hugging Face':
|
133 |
+
llm = HuggingFaceHub(repo_id=sb['llm_model'],
|
134 |
+
model_kwargs={"temperature": sb['model_options']['temperature'], "max_length": out_token})
|
135 |
+
logging.info('LLM model set: '+str(llm))
|
136 |
+
|
137 |
+
st.session_state.qa_model_obj.update_model(llm,
|
138 |
+
k=sb['model_options']['k'],
|
139 |
+
search_type=sb['model_options']['search_type'],
|
140 |
+
filter_arg=filter_toggle)
|
141 |
+
logging.info('QA model object updated: '+str(st.session_state.qa_model_obj))
|
142 |
+
|
143 |
+
st.write('Searching vector database, generating prompt...')
|
144 |
+
logging.info('Searching vector database, generating prompt...')
|
145 |
+
st.session_state.qa_model_obj.query_docs(prompt)
|
146 |
+
ai_response=st.session_state.qa_model_obj.result['answer'].content
|
147 |
+
message_placeholder.markdown(ai_response)
|
148 |
+
t_delta=time.time() - t_start
|
149 |
+
status.update(label='Prompt generated in '+"{:10.3f}".format(t_delta)+' seconds', state='complete', expanded=False)
|
150 |
+
|
151 |
+
st.session_state.messages.append({'role': 'assistant', 'content': ai_response})
|
152 |
+
logging.info(f'Messaging complete for {st.session_state.message_id}.')
|
153 |
+
|
154 |
+
# Add reset button
|
155 |
+
if st.button('Restart session'):
|
156 |
+
st.session_state.qa_model_obj = []
|
157 |
+
st.session_state.message_id = 0
|
158 |
+
st.session_state.messages = []
|
pages/2_Chatbot_AMS_Canopy.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import queries
|
3 |
-
import pinecone
|
4 |
-
from langchain.embeddings import OpenAIEmbeddings
|
5 |
-
from langchain.llms import OpenAI
|
6 |
-
import streamlit as st
|
7 |
-
import openai
|
8 |
-
import time
|
9 |
-
|
10 |
-
from tqdm.auto import tqdm
|
11 |
-
from typing import Tuple
|
12 |
-
|
13 |
-
# from dotenv import load_dotenv,find_dotenv,dotenv_values
|
14 |
-
# load_dotenv(find_dotenv(),override=True)
|
15 |
-
|
16 |
-
from canopy.tokenizer import Tokenizer
|
17 |
-
from canopy.knowledge_base import KnowledgeBase
|
18 |
-
from canopy.context_engine import ContextEngine
|
19 |
-
from canopy.chat_engine import ChatEngine
|
20 |
-
from canopy.llm.openai import OpenAILLM
|
21 |
-
# from canopy.llm.models import ModelParams
|
22 |
-
from canopy.models.data_models import Document, Messages, UserMessage, AssistantMessage
|
23 |
-
from canopy.models.api_models import ChatResponse
|
24 |
-
|
25 |
-
def chat(new_message: str, history: Messages) -> Tuple[str, Messages, ChatResponse]:
|
26 |
-
messages = history + [UserMessage(content=new_message)]
|
27 |
-
response = chat_engine.chat(messages)
|
28 |
-
assistant_response = response.choices[0].message.content
|
29 |
-
return assistant_response, messages + [AssistantMessage(content=assistant_response)], response
|
30 |
-
|
31 |
-
# Set secrets
|
32 |
-
# PINECONE_ENVIRONMENT=db.secrets.get('PINECONE_ENVIRONMENT')
|
33 |
-
# PINECONE_API_KEY=db.secrets.get('PINECONE_API_KEY')
|
34 |
-
PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
|
35 |
-
PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
|
36 |
-
|
37 |
-
# Set the page title
|
38 |
-
st.set_page_config(
|
39 |
-
page_title='Aerospace Chatbot: AMS w/Langchain',
|
40 |
-
)
|
41 |
-
st.title('Aerospace Mechanisms Chatbot')
|
42 |
-
with st.expander('''What's under the hood?'''):
|
43 |
-
st.markdown('''
|
44 |
-
This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
|
45 |
-
* Source code: https://github.com/dsmueller3760/aerospace_chatbot/blob/main/scripts/setup_page_canopy.py
|
46 |
-
* Uses pinecone canopy: https://www.pinecone.io/blog/canopy-rag-framework/
|
47 |
-
* **Response time ~45 seconds per prompt**
|
48 |
-
''')
|
49 |
-
|
50 |
-
# Add a sidebar for input options
|
51 |
-
st.title('Input')
|
52 |
-
st.sidebar.title('Input Options')
|
53 |
-
|
54 |
-
# Add input fields in the sidebar
|
55 |
-
model_name=st.sidebar.selectbox('Model', ['gpt-3.5-turbo''gpt-3.5-turbo-16k','gpt-3.5-turbo','gpt-3.5-turbo-1106','gpt-4','gpt-4-32k'], index=1)
|
56 |
-
model_list={'gpt-3.5-turbo':4096,
|
57 |
-
'gpt-3.5-turbo-16k':16385,
|
58 |
-
'gpt-3.5-turbo-1106':16385,
|
59 |
-
'gpt-4':8192,
|
60 |
-
'gpt-4-32k':32768}
|
61 |
-
temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=2.0, value=0.0, step=0.1)
|
62 |
-
n=None # Not used. How many chat completion choices to generate for each input message.
|
63 |
-
top_p=None # Not used. Only use this or temperature. Where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
|
64 |
-
|
65 |
-
k=st.sidebar.number_input('Number document chunks per query', min_value=1, step=1, value=15)
|
66 |
-
output_level=st.sidebar.selectbox('Level of Output', ['Concise', 'Detailed', 'No Limit'], index=2)
|
67 |
-
max_prompt_tokens=model_list[model_name]
|
68 |
-
|
69 |
-
# Vector databases
|
70 |
-
st.sidebar.title('Vector Database')
|
71 |
-
index_name=st.sidebar.selectbox('Index name', ['canopy--ams'], index=0)
|
72 |
-
|
73 |
-
# Embeddings
|
74 |
-
st.sidebar.title('Embeddings')
|
75 |
-
embedding_type=st.sidebar.selectbox('Embedding type', ['Openai'], index=0)
|
76 |
-
embedding_name=st.sidebar.selectbox('Embedding name', ['text-embedding-ada-002'], index=0)
|
77 |
-
|
78 |
-
# Add a section for secret keys
|
79 |
-
st.sidebar.title('Secret Keys')
|
80 |
-
OPENAI_API_KEY = st.sidebar.text_input('OpenAI API Key', type='password')
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
if OPENAI_API_KEY:
|
85 |
-
openai.api_key = OPENAI_API_KEY
|
86 |
-
embeddings_model = OpenAIEmbeddings(model=embedding_name,openai_api_key=OPENAI_API_KEY)
|
87 |
-
|
88 |
-
# Set up chat history
|
89 |
-
qa_model_obj = st.session_state.get('qa_model_obj',[])
|
90 |
-
message_id = st.session_state.get('message_id', 0)
|
91 |
-
history = st.session_state.get('history',[])
|
92 |
-
|
93 |
-
if 'messages' not in st.session_state:
|
94 |
-
st.session_state.messages = []
|
95 |
-
for message in st.session_state.messages:
|
96 |
-
with st.chat_message(message['role']):
|
97 |
-
st.markdown(message['content'])
|
98 |
-
|
99 |
-
# Process some items
|
100 |
-
if output_level == 'Concise':
|
101 |
-
out_token = 50
|
102 |
-
else:
|
103 |
-
out_token = 516
|
104 |
-
|
105 |
-
# Display assistant response in chat message container
|
106 |
-
if prompt := st.chat_input('Prompt here'):
|
107 |
-
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
108 |
-
with st.chat_message('user'):
|
109 |
-
st.markdown(prompt)
|
110 |
-
with st.chat_message('assistant'):
|
111 |
-
message_placeholder = st.empty()
|
112 |
-
|
113 |
-
with st.status('Generating response...') as status:
|
114 |
-
t_start=time.time()
|
115 |
-
message_id += 1
|
116 |
-
st.write('Message: '+str(message_id))
|
117 |
-
|
118 |
-
# Process some items
|
119 |
-
if output_level == 'Concise':
|
120 |
-
max_generated_tokens = 50
|
121 |
-
elif output_level == 'Detailed':
|
122 |
-
max_generated_tokens = 516
|
123 |
-
else:
|
124 |
-
max_generated_tokens = None
|
125 |
-
|
126 |
-
# Inialize canopy
|
127 |
-
Tokenizer.initialize()
|
128 |
-
pinecone.init(
|
129 |
-
api_key=PINECONE_API_KEY,
|
130 |
-
environment=PINECONE_ENVIRONMENT
|
131 |
-
)
|
132 |
-
|
133 |
-
kb = KnowledgeBase(index_name=index_name,
|
134 |
-
default_top_k=k)
|
135 |
-
kb.connect()
|
136 |
-
context_engine = ContextEngine(kb)
|
137 |
-
llm=OpenAILLM(model_name=model_name)
|
138 |
-
chat_engine = ChatEngine(context_engine,
|
139 |
-
llm=llm,
|
140 |
-
max_generated_tokens=max_generated_tokens,
|
141 |
-
max_prompt_tokens=max_prompt_tokens)
|
142 |
-
|
143 |
-
st.write('Searching vector database, generating prompt...')
|
144 |
-
response, history, chat_response = chat(prompt, history)
|
145 |
-
|
146 |
-
message_placeholder.markdown(response)
|
147 |
-
t_delta=time.time() - t_start
|
148 |
-
status.update(label='Prompt generated in '+"{:10.3f}".format(t_delta)+' seconds', state='complete', expanded=False)
|
149 |
-
|
150 |
-
st.session_state['history'] = history
|
151 |
-
st.session_state['qa_model_obj'] = qa_model_obj
|
152 |
-
st.session_state['message_id'] = message_id
|
153 |
-
st.session_state.messages.append({'role': 'assistant', 'content': response})
|
154 |
-
|
155 |
-
else:
|
156 |
-
st.warning('No API key found. Add your API key in the sidebar under Secret Keys. Find it or create one here: https://platform.openai.com/api-keys')
|
157 |
-
st.info('Your API-key is not stored in any form by this app. However, for transparency it is recommended to delete your API key once used.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pages/2_Document_Upload.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import data_import, setup
|
2 |
+
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import logging
|
6 |
+
import glob
|
7 |
+
|
8 |
+
from langchain_openai import OpenAIEmbeddings
|
9 |
+
from langchain_community.embeddings import VoyageEmbeddings
|
10 |
+
|
11 |
+
from ragatouille import RAGPretrainedModel
|
12 |
+
|
13 |
+
import streamlit as st
|
14 |
+
|
15 |
+
# Set up the page, enable logging
|
16 |
+
from dotenv import load_dotenv,find_dotenv
|
17 |
+
load_dotenv(find_dotenv(),override=True)
|
18 |
+
logging.basicConfig(filename='app_2_document_upload.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
|
19 |
+
|
20 |
+
# Set the page title
|
21 |
+
st.set_page_config(
|
22 |
+
page_title='Upload PDFs',
|
23 |
+
)
|
24 |
+
st.title('Upload PDFs')
|
25 |
+
|
26 |
+
sb=setup.load_sidebar(config_file='../config/config.json',
|
27 |
+
index_data_file='../config/index_data.json',
|
28 |
+
vector_databases=True,
|
29 |
+
embeddings=True,
|
30 |
+
index_name=True,
|
31 |
+
secret_keys=True)
|
32 |
+
|
33 |
+
secrets=setup.set_secrets(sb) # Take secrets from .env file first, otherwise from sidebar
|
34 |
+
|
35 |
+
# Populate the main screen
|
36 |
+
logging.info(f'index_type test, {sb["index_type"]}')
|
37 |
+
|
38 |
+
if sb["index_type"]=='RAGatouille':
|
39 |
+
logging.info('Set hugging face model for queries.')
|
40 |
+
query_model=sb['query_model']
|
41 |
+
elif sb['query_model']=='Openai' or 'Voyage':
|
42 |
+
logging.info('Set embeddings model for queries.')
|
43 |
+
if sb['query_model']=='Openai':
|
44 |
+
query_model=OpenAIEmbeddings(model=sb['embedding_name'],openai_api_key=secrets['OPENAI_API_KEY'])
|
45 |
+
elif sb['query_model']=='Voyage':
|
46 |
+
query_model=VoyageEmbeddings(voyage_api_key=secrets['VOYAGE_API_KEY'])
|
47 |
+
logging.info('Query model set: '+str(query_model))
|
48 |
+
|
49 |
+
# Find docs
|
50 |
+
index_name_md=st.markdown('Enter a directory relative to the current directory, or an absolute path.')
|
51 |
+
data_folder = st.text_input('Enter a directory','../data/AMS/')
|
52 |
+
if not os.path.isdir(data_folder):
|
53 |
+
st.error('The entered directory does not exist')
|
54 |
+
docs = glob.glob(data_folder+'*.pdf') # Only get the PDFs in the directory
|
55 |
+
st.markdown('PDFs found: '+str(docs))
|
56 |
+
st.markdown('Number of PDFs found: ' + str(len(docs)))
|
57 |
+
logging.info('Docs: '+str(docs))
|
58 |
+
|
59 |
+
# Add an expandable box for options
|
60 |
+
with st.expander("Options"):
|
61 |
+
use_json = st.checkbox('Use existing jsonl, if available (will ignore chunk method, size, and overlap)?', value=True)
|
62 |
+
json_file=st.text_input('Jsonl file',data_folder+'ams_data.jsonl')
|
63 |
+
clear_database = st.checkbox('Clear existing database?')
|
64 |
+
chunk_method= st.selectbox('Chunk method', ['tiktoken_recursive'], index=0)
|
65 |
+
if sb['query_model']=='Openai' or 'ChromaDB':
|
66 |
+
# OpenAI will time out if the batch size is too large
|
67 |
+
batch_size=st.number_input('Batch size for upsert', min_value=1, step=1, value=100)
|
68 |
+
else:
|
69 |
+
batch_size=None
|
70 |
+
if chunk_method=='tiktoken_recursive':
|
71 |
+
chunk_size=st.number_input('Chunk size (tokens)', min_value=1, step=1, value=500)
|
72 |
+
chunk_overlap=st.number_input('Chunk overlap (tokens)', min_value=0, step=1, value=0)
|
73 |
+
else:
|
74 |
+
raise NotImplementedError
|
75 |
+
|
76 |
+
# Add a button to run the function
|
77 |
+
if st.button('Chunk docs to jsonl file'):
|
78 |
+
start_time = time.time() # Start the timer
|
79 |
+
data_import.chunk_docs(docs,
|
80 |
+
file=json_file,
|
81 |
+
chunk_method=chunk_method,
|
82 |
+
chunk_size=chunk_size,
|
83 |
+
chunk_overlap=chunk_overlap,
|
84 |
+
use_json=False)
|
85 |
+
end_time = time.time() # Stop the timer
|
86 |
+
elapsed_time = end_time - start_time
|
87 |
+
st.write(f"Elapsed Time: {elapsed_time:.2f} seconds")
|
88 |
+
if st.button('Load docs into vector database'):
|
89 |
+
start_time = time.time() # Start the timer
|
90 |
+
data_import.load_docs(sb['index_type'],
|
91 |
+
docs,
|
92 |
+
query_model=query_model,
|
93 |
+
index_name=sb['index_name'],
|
94 |
+
chunk_size=chunk_size,
|
95 |
+
chunk_overlap=chunk_overlap,
|
96 |
+
use_json=use_json,
|
97 |
+
clear=clear_database,
|
98 |
+
file=json_file,
|
99 |
+
batch_size=batch_size)
|
100 |
+
end_time = time.time() # Stop the timer
|
101 |
+
elapsed_time = end_time - start_time
|
102 |
+
st.write(f"Elapsed Time: {elapsed_time:.2f} seconds")
|
103 |
+
# Add a button to delete the index
|
104 |
+
if st.button('Delete existing index'):
|
105 |
+
start_time = time.time() # Start the timer
|
106 |
+
data_import.delete_index(sb['index_type'],sb['index_name'])
|
107 |
+
end_time = time.time() # Stop the timer
|
108 |
+
elapsed_time = end_time - start_time
|
109 |
+
st.write(f"Elapsed Time: {elapsed_time:.2f} seconds")
|
pages/3_Visualize_Data.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import setup
|
2 |
+
|
3 |
+
import time
|
4 |
+
import logging
|
5 |
+
from datetime import datetime
|
6 |
+
|
7 |
+
from langchain_openai import OpenAIEmbeddings
|
8 |
+
from langchain_community.embeddings import VoyageEmbeddings
|
9 |
+
|
10 |
+
from ragxplorer import RAGxplorer
|
11 |
+
|
12 |
+
import streamlit as st
|
13 |
+
|
14 |
+
# Set up the page, enable logging
|
15 |
+
from dotenv import load_dotenv,find_dotenv
|
16 |
+
load_dotenv(find_dotenv(),override=True)
|
17 |
+
logging.basicConfig(filename='app_3_visualize_data.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
|
18 |
+
|
19 |
+
# Set the page title
|
20 |
+
st.set_page_config(
|
21 |
+
page_title='Visualize Data',
|
22 |
+
layout='wide'
|
23 |
+
)
|
24 |
+
st.title('Visualize Data')
|
25 |
+
|
26 |
+
sb=setup.load_sidebar(config_file='../config/config.json',
|
27 |
+
index_data_file='../config/index_data.json',
|
28 |
+
vector_databases=True,
|
29 |
+
embeddings=True,
|
30 |
+
index_name=True,
|
31 |
+
secret_keys=True)
|
32 |
+
secrets=setup.set_secrets(sb) # Take secrets from .env file first, otherwise from sidebar
|
33 |
+
|
34 |
+
# Set up session state variables
|
35 |
+
if 'client' not in st.session_state:
|
36 |
+
st.session_state.client = None
|
37 |
+
|
38 |
+
# Populate the main screen
|
39 |
+
logging.info(f'index_type test, {sb["index_type"]}')
|
40 |
+
|
41 |
+
if sb["index_type"]=='RAGatouille':
|
42 |
+
raise Exception('Only index type ChromaDB is supported for this function.')
|
43 |
+
elif sb["index_type"]=='Pinecone':
|
44 |
+
raise Exception('Only index type ChromaDB is supported for this function.')
|
45 |
+
elif sb['query_model']=='Openai' or 'Voyage':
|
46 |
+
logging.info('Set embeddings model for queries.')
|
47 |
+
if sb['query_model']=='Openai':
|
48 |
+
query_model=OpenAIEmbeddings(model=sb['embedding_name'],openai_api_key=secrets['OPENAI_API_KEY'])
|
49 |
+
elif sb['query_model']=='Voyage':
|
50 |
+
query_model=VoyageEmbeddings(voyage_api_key=secrets['VOYAGE_API_KEY'])
|
51 |
+
logging.info('Query model set: '+str(query_model))
|
52 |
+
|
53 |
+
st.info('You must have created a database using Document Upload in ChromaDB for this to work.')
|
54 |
+
|
55 |
+
# Add an expandable with description of what's going on.
|
56 |
+
with st.expander("Under the hood",expanded=True):
|
57 |
+
st.markdown('''
|
58 |
+
Uses modified version of https://github.com/gabrielchua/RAGxplorer/tree/main?tab=readme-ov-file to connect to existing database created.
|
59 |
+
Assumes that chroma databases are located in ../db/chroma
|
60 |
+
Query size in database: Take a random sample of this size from the database to visualize.
|
61 |
+
''')
|
62 |
+
|
63 |
+
with st.expander("Create visualization data",expanded=True):
|
64 |
+
# Add a button to run the function
|
65 |
+
vector_qty=st.number_input('Query size in database', min_value=1, step=10, value=50)
|
66 |
+
export_df = st.checkbox('Export visualization data?', value=True)
|
67 |
+
if export_df:
|
68 |
+
current_time = datetime.now().strftime("%Y.%m.%d.%H.%M")
|
69 |
+
df_export_path = st.text_input('Export file', f'../data/AMS/ams_data-400-0-{vector_qty}.json')
|
70 |
+
if st.button('Create visualization data'):
|
71 |
+
start_time = time.time() # Start the timer
|
72 |
+
|
73 |
+
st.session_state.client = RAGxplorer(embedding_model=sb['embedding_name'])
|
74 |
+
st.session_state.client.load_db(path_to_db='../db/chromadb/',index_name=sb['index_name'],
|
75 |
+
df_export_path=df_export_path,
|
76 |
+
vector_qty=vector_qty,
|
77 |
+
verbose=True)
|
78 |
+
|
79 |
+
end_time = time.time() # Stop the timer
|
80 |
+
elapsed_time = end_time - start_time
|
81 |
+
st.write(f"Elapsed Time: {elapsed_time:.2f} seconds")
|
82 |
+
|
83 |
+
with st.expander("Visualize data",expanded=True):
|
84 |
+
import_data = st.checkbox('Import visualization data?', value=True)
|
85 |
+
if import_data:
|
86 |
+
import_file = st.file_uploader("Import file", type="json")
|
87 |
+
if import_file is None:
|
88 |
+
# Use a default file
|
89 |
+
import_file_path=st.text_input('Import file',df_export_path)
|
90 |
+
else:
|
91 |
+
# Use the uploaded file
|
92 |
+
import_file_path=st.text_input('Import file',f'../data/AMS/{import_file.name}')
|
93 |
+
else:
|
94 |
+
import_file_path=None
|
95 |
+
|
96 |
+
query = st.text_input('Query', 'What are examples of lubricants which should be avoided for space mechanism applications?')
|
97 |
+
|
98 |
+
if st.button('Visualize data'):
|
99 |
+
start_time = time.time() # Start the timer
|
100 |
+
|
101 |
+
if st.session_state.client is None:
|
102 |
+
st.session_state.client = RAGxplorer(embedding_model=sb['embedding_name'])
|
103 |
+
|
104 |
+
fig = st.session_state.client.visualize_query(query,
|
105 |
+
path_to_db='../db/chromadb/', viz_data_df_path=import_file_path,
|
106 |
+
verbose=True)
|
107 |
+
st.plotly_chart(fig,use_container_width=True)
|
108 |
+
|
109 |
+
end_time = time.time() # Stop the timer
|
110 |
+
elapsed_time = end_time - start_time
|
prompts.py
CHANGED
@@ -1,69 +1,12 @@
|
|
1 |
-
from langchain.prompts.prompt import PromptTemplate
|
2 |
from langchain import hub
|
|
|
3 |
|
4 |
-
#
|
5 |
-
# ----------------
|
6 |
-
# Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
7 |
-
# Include sources from the chat history in the standalone question created.
|
8 |
-
# ----------------
|
9 |
-
|
10 |
-
# Chat History:
|
11 |
-
# {chat_history}
|
12 |
-
# User Question: {question}
|
13 |
-
# Standalone Question:"""
|
14 |
CONDENSE_QUESTION_PROMPT = hub.pull("dmueller/ams-chatbot-qa-condense-history")
|
15 |
-
|
16 |
-
# _template_qa = """Use Markdown to make your answers nice. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
|
17 |
-
# ----------------
|
18 |
-
# Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
19 |
-
# ----------------
|
20 |
-
|
21 |
-
# Sources and Context from Reference Documents:
|
22 |
-
# {context}
|
23 |
-
# User Question:{question}
|
24 |
-
# Chatbot:
|
25 |
-
|
26 |
-
# """
|
27 |
QA_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval")
|
28 |
-
|
29 |
-
# _template_qa_wsources="""Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
30 |
-
# ----------------
|
31 |
-
# Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
32 |
-
# ----------------
|
33 |
-
# If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
34 |
-
# ALWAYS return a "SOURCES" part in your answer.
|
35 |
-
|
36 |
-
# QUESTION: Which state/country's law governs the interpretation of the contract?
|
37 |
-
# =========
|
38 |
-
# Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
|
39 |
-
# Source: 28-pl
|
40 |
-
# Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
|
41 |
-
# Source: 30-pl
|
42 |
-
# Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
|
43 |
-
# Source: 4-pl
|
44 |
-
# =========
|
45 |
-
# FINAL ANSWER: This Agreement is governed by English law.
|
46 |
-
# SOURCES: 28-pl
|
47 |
-
|
48 |
-
# QUESTION: What did the president say about Michael Jackson?
|
49 |
-
# =========
|
50 |
-
# Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
|
51 |
-
# Source: 0-pl
|
52 |
-
# Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
|
53 |
-
# Source: 24-pl
|
54 |
-
# Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
|
55 |
-
# Source: 5-pl
|
56 |
-
# Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
|
57 |
-
# Source: 34-pl
|
58 |
-
# =========
|
59 |
-
# FINAL ANSWER: The president did not mention Michael Jackson.
|
60 |
-
# SOURCES:
|
61 |
-
|
62 |
-
# QUESTION: {question}
|
63 |
-
# =========
|
64 |
-
# {summaries}
|
65 |
-
# =========
|
66 |
-
# FINAL ANSWER:"""
|
67 |
QA_WSOURCES_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval-wsources")
|
68 |
-
|
69 |
QA_GENERATE_PROMPT=hub.pull("dmueller/generate_qa_prompt")
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from langchain import hub
|
2 |
+
from langchain.prompts.prompt import PromptTemplate
|
3 |
|
4 |
+
# Prompts on the hub: https://smith.langchain.com/hub/my-prompts?organizationId=45eb8917-7353-4296-978d-bb461fc45c65
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
CONDENSE_QUESTION_PROMPT = hub.pull("dmueller/ams-chatbot-qa-condense-history")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
QA_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
QA_WSOURCES_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval-wsources")
|
|
|
8 |
QA_GENERATE_PROMPT=hub.pull("dmueller/generate_qa_prompt")
|
9 |
+
|
10 |
+
# Prompts defined here only
|
11 |
+
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
12 |
+
TEST_QUERY_PROMPT='What are examples of adhesives to use when potting motors for launch vehicle or spacecraft mechanisms?'
|
queries.py
CHANGED
@@ -1,145 +1,268 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
"""
|
5 |
-
from dotenv import load_dotenv, find_dotenv
|
6 |
-
from langchain.vectorstores import Pinecone
|
7 |
-
from langchain.embeddings import OpenAIEmbeddings
|
8 |
-
from langchain.llms import OpenAI
|
9 |
|
10 |
-
from
|
11 |
-
from langchain.chains import ConversationalRetrievalChain
|
12 |
-
from langchain.memory import ConversationBufferMemory
|
13 |
-
from langchain.chains.llm import LLMChain
|
14 |
|
15 |
-
import
|
16 |
import pinecone
|
|
|
17 |
|
18 |
-
from
|
|
|
19 |
|
|
|
20 |
|
|
|
|
|
|
|
|
|
|
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
class QA_Model:
|
23 |
def __init__(self,
|
|
|
24 |
index_name,
|
25 |
-
|
26 |
llm,
|
27 |
k=6,
|
28 |
search_type='similarity',
|
|
|
29 |
temperature=0,
|
30 |
-
verbose=False,
|
31 |
chain_type='stuff',
|
32 |
filter_arg=False):
|
33 |
|
34 |
-
self.
|
35 |
-
self.
|
|
|
36 |
self.llm=llm
|
37 |
-
self.k
|
38 |
-
self.search_type
|
39 |
-
self.
|
40 |
-
self.
|
41 |
-
self.chain_type
|
42 |
-
self.filter_arg
|
|
|
43 |
|
44 |
load_dotenv(find_dotenv(),override=True)
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
# Read in from the vector database
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
self.chat_history=ConversationBufferMemory(memory_key='chat_history',
|
57 |
-
input_key='question',
|
58 |
-
output_key='answer',
|
59 |
-
return_messages=True)
|
60 |
-
|
61 |
-
# Implement filter
|
62 |
-
if filter_arg:
|
63 |
-
filter_list = list(set(item["source"] for item in self.sources[-1]))
|
64 |
-
filter_items=[]
|
65 |
-
for item in filter_list:
|
66 |
-
filter_item={"source": item}
|
67 |
-
filter_items.append(filter_item)
|
68 |
-
filter={"$or":filter_items}
|
69 |
-
else:
|
70 |
-
filter=None
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
self.llm=llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
search_kwargs=search_kwargs),
|
139 |
-
combine_docs_chain=self.doc_chain,
|
140 |
-
question_generator=self.question_generator,
|
141 |
-
memory=self.chat_history,
|
142 |
-
verbose=verbose,
|
143 |
-
return_source_documents=True,
|
144 |
-
return_generated_question=True,
|
145 |
-
)
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import re
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
from dotenv import load_dotenv, find_dotenv
|
|
|
|
|
|
|
6 |
|
7 |
+
import openai
|
8 |
import pinecone
|
9 |
+
import chromadb
|
10 |
|
11 |
+
from langchain_community.vectorstores import Pinecone
|
12 |
+
from langchain_community.vectorstores import Chroma
|
13 |
|
14 |
+
from langchain.memory import ConversationBufferMemory
|
15 |
|
16 |
+
from operator import itemgetter
|
17 |
+
from langchain_core.output_parsers import StrOutputParser
|
18 |
+
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
|
19 |
+
from langchain.schema import format_document
|
20 |
+
from langchain_core.messages import get_buffer_string
|
21 |
|
22 |
+
from prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT, DEFAULT_DOCUMENT_PROMPT, TEST_QUERY_PROMPT
|
23 |
+
|
24 |
+
# Set secrets from environment file
|
25 |
+
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
26 |
+
VOYAGE_API_KEY=os.getenv('VOYAGE_API_KEY')
|
27 |
+
PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
|
28 |
+
HUGGINGFACEHUB_API_TOKEN=os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
29 |
+
|
30 |
+
# Class and functions
|
31 |
class QA_Model:
|
32 |
def __init__(self,
|
33 |
+
index_type,
|
34 |
index_name,
|
35 |
+
query_model,
|
36 |
llm,
|
37 |
k=6,
|
38 |
search_type='similarity',
|
39 |
+
fetch_k=50,
|
40 |
temperature=0,
|
|
|
41 |
chain_type='stuff',
|
42 |
filter_arg=False):
|
43 |
|
44 |
+
self.index_type=index_type
|
45 |
+
self.index_name=index_name
|
46 |
+
self.query_model=query_model
|
47 |
self.llm=llm
|
48 |
+
self.k=k
|
49 |
+
self.search_type=search_type
|
50 |
+
self.fetch_k=fetch_k
|
51 |
+
self.temperature=temperature
|
52 |
+
self.chain_type=chain_type
|
53 |
+
self.filter_arg=filter_arg
|
54 |
+
self.sources=[]
|
55 |
|
56 |
load_dotenv(find_dotenv(),override=True)
|
57 |
|
58 |
+
# Define retriever search parameters
|
59 |
+
search_kwargs = _process_retriever_args(self.filter_arg,
|
60 |
+
self.sources,
|
61 |
+
self.search_type,
|
62 |
+
self.k,
|
63 |
+
self.fetch_k)
|
64 |
+
|
65 |
# Read in from the vector database
|
66 |
+
if index_type=='Pinecone':
|
67 |
+
pinecone.init(
|
68 |
+
api_key=PINECONE_API_KEY
|
69 |
+
)
|
70 |
+
logging.info('Chat pinecone index name: '+str(index_name))
|
71 |
+
logging.info('Chat query model: '+str(query_model))
|
72 |
+
index = pinecone.Index(index_name)
|
73 |
+
self.vectorstore = Pinecone(index,query_model,'page_content')
|
74 |
+
logging.info('Chat vectorstore: '+str(self.vectorstore))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
# Test query
|
77 |
+
test_query = self.vectorstore.similarity_search(TEST_QUERY_PROMPT)
|
78 |
+
logging.info('Test query: '+str(test_query))
|
79 |
+
if not test_query:
|
80 |
+
raise ValueError("Pinecone vector database is not configured properly. Test query failed.")
|
81 |
+
else:
|
82 |
+
logging.info('Test query succeeded!')
|
83 |
+
|
84 |
+
self.retriever=self.vectorstore.as_retriever(search_type=search_type,
|
85 |
+
search_kwargs=search_kwargs)
|
86 |
+
logging.info('Chat retriever: '+str(self.retriever))
|
87 |
+
elif index_type=='ChromaDB':
|
88 |
+
logging.info('Chat chroma index name: '+str(index_name))
|
89 |
+
logging.info('Chat query model: '+str(query_model))
|
90 |
+
persistent_client = chromadb.PersistentClient(path='../db/chromadb')
|
91 |
+
self.vectorstore = Chroma(client=persistent_client,
|
92 |
+
collection_name=index_name,
|
93 |
+
embedding_function=query_model)
|
94 |
+
logging.info('Chat vectorstore: '+str(self.vectorstore))
|
95 |
|
96 |
+
# Test query
|
97 |
+
test_query = self.vectorstore.similarity_search(TEST_QUERY_PROMPT)
|
98 |
+
logging.info('Test query: '+str(test_query))
|
99 |
+
if not test_query:
|
100 |
+
raise ValueError("Chroma vector database is not configured properly. Test query failed.")
|
101 |
+
else:
|
102 |
+
logging.info('Test query succeeded!')
|
103 |
+
|
104 |
+
self.retriever=self.vectorstore.as_retriever(search_type=search_type,
|
105 |
+
search_kwargs=search_kwargs)
|
106 |
+
logging.info('Chat retriever: '+str(self.retriever))
|
107 |
+
elif index_type=='RAGatouille':
|
108 |
+
# Easy because the index is picked up directly.
|
109 |
+
self.vectorstore=query_model
|
110 |
+
logging.info('Chat query model:'+str(query_model))
|
111 |
|
112 |
+
# Test query
|
113 |
+
test_query = self.vectorstore.search(TEST_QUERY_PROMPT)
|
114 |
+
logging.info('Test query: '+str(test_query))
|
115 |
+
if not test_query:
|
116 |
+
raise ValueError("Chroma vector database is not configured properly. Test query failed.")
|
117 |
+
else:
|
118 |
+
logging.info('Test query succeeded!')
|
119 |
+
|
120 |
+
self.retriever=self.vectorstore.as_langchain_retriever()
|
121 |
+
logging.info('Chat retriever: '+str(self.retriever))
|
122 |
|
123 |
+
# Intialize memory
|
124 |
+
self.memory = ConversationBufferMemory(
|
125 |
+
return_messages=True, output_key='answer', input_key='question')
|
126 |
+
logging.info('Memory: '+str(self.memory))
|
127 |
|
128 |
+
# Assemble main chain
|
129 |
+
self.conversational_qa_chain=_define_qa_chain(self.llm,
|
130 |
+
self.retriever,
|
131 |
+
self.memory,
|
132 |
+
self.search_type,
|
133 |
+
search_kwargs)
|
134 |
+
def query_docs(self,query):
|
135 |
+
self.memory.load_memory_variables({})
|
136 |
+
logging.info('Memory content before qa result: '+str(self.memory))
|
137 |
+
|
138 |
+
logging.info('Query: '+str(query))
|
139 |
+
self.result = self.conversational_qa_chain.invoke({'question': query})
|
140 |
+
logging.info('QA result: '+str(self.result))
|
141 |
+
|
142 |
+
if self.index_type!='RAGatouille':
|
143 |
+
self.sources = '\n'.join(str(data.metadata) for data in self.result['references'])
|
144 |
+
self.result['answer'].content += '\nSources: \n'+self.sources
|
145 |
+
logging.info('Sources: '+str(self.sources))
|
146 |
+
logging.info('Response with sources: '+str(self.result['answer'].content))
|
147 |
+
else:
|
148 |
+
# RAGatouille doesn't have metadata, need to extract from context first.
|
149 |
+
extracted_metadata = []
|
150 |
+
pattern = r'\{([^}]*)\}(?=[^{}]*$)' # Regular expression pattern to match the last curly braces
|
151 |
|
152 |
+
for ref in self.result['references']:
|
153 |
+
match = re.search(pattern, ref.page_content)
|
154 |
+
if match:
|
155 |
+
extracted_metadata.append("{"+match.group(1)+"}")
|
156 |
+
self.sources = '\n'.join(extracted_metadata)
|
157 |
+
self.result['answer'].content += '\nSources: \n'+self.sources
|
158 |
+
logging.info('Sources: '+str(self.sources))
|
159 |
+
logging.info('Response with sources: '+str(self.result['answer'].content))
|
160 |
+
|
161 |
+
self.memory.save_context({'question': query}, {'answer': self.result['answer'].content})
|
162 |
+
logging.info('Memory content after qa result: '+str(self.memory))
|
163 |
+
|
164 |
+
def update_model(self,
|
165 |
+
llm,
|
166 |
+
k=6,
|
167 |
+
search_type='similarity',
|
168 |
+
fetch_k=50,
|
169 |
+
filter_arg=False):
|
170 |
|
171 |
self.llm=llm
|
172 |
+
self.k=k
|
173 |
+
self.search_type=search_type
|
174 |
+
self.fetch_k=fetch_k
|
175 |
+
self.filter_arg=filter_arg
|
176 |
+
|
177 |
+
# Define retriever search parameters
|
178 |
+
search_kwargs = _process_retriever_args(self.filter_arg,
|
179 |
+
self.sources,
|
180 |
+
self.search_type,
|
181 |
+
self.k,
|
182 |
+
self.fetch_k)
|
183 |
+
# Update conversational retrieval chain
|
184 |
+
self.conversational_qa_chain=_define_qa_chain(self.llm,
|
185 |
+
self.retriever,
|
186 |
+
self.memory,
|
187 |
+
self.search_type,
|
188 |
+
search_kwargs)
|
189 |
+
logging.info('Updated qa chain: '+str(self.conversational_qa_chain))
|
190 |
|
191 |
+
# Internal functions
|
192 |
+
def _combine_documents(docs,
|
193 |
+
document_prompt=DEFAULT_DOCUMENT_PROMPT,
|
194 |
+
document_separator='\n\n'):
|
195 |
+
'''
|
196 |
+
Combine a list of documents into a single string.
|
197 |
+
'''
|
198 |
+
# TODO: this would be where stuff, map reduce, etc. would go
|
199 |
+
doc_strings = [format_document(doc, document_prompt) for doc in docs]
|
200 |
+
return document_separator.join(doc_strings)
|
201 |
+
def _define_qa_chain(llm,
|
202 |
+
retriever,
|
203 |
+
memory,
|
204 |
+
search_type,
|
205 |
+
search_kwargs):
|
206 |
+
'''
|
207 |
+
Define the conversational QA chain.
|
208 |
+
'''
|
209 |
+
# This adds a 'memory' key to the input object
|
210 |
+
loaded_memory = RunnablePassthrough.assign(
|
211 |
+
chat_history=RunnableLambda(memory.load_memory_variables)
|
212 |
+
| itemgetter('history'))
|
213 |
+
logging.info('Loaded memory: '+str(loaded_memory))
|
214 |
+
|
215 |
+
# Assemble main chain
|
216 |
+
standalone_question = {
|
217 |
+
'standalone_question': {
|
218 |
+
'question': lambda x: x['question'],
|
219 |
+
'chat_history': lambda x: get_buffer_string(x['chat_history'])}
|
220 |
+
| CONDENSE_QUESTION_PROMPT
|
221 |
+
| llm
|
222 |
+
| StrOutputParser()}
|
223 |
+
logging.info('Condense inputs as a standalong question: '+str(standalone_question))
|
224 |
+
retrieved_documents = {
|
225 |
+
'source_documents': itemgetter('standalone_question')
|
226 |
+
| retriever,
|
227 |
+
'question': lambda x: x['standalone_question']}
|
228 |
+
logging.info('Retrieved documents: '+str(retrieved_documents))
|
229 |
+
# Now we construct the inputs for the final prompt
|
230 |
+
final_inputs = {
|
231 |
+
'context': lambda x: _combine_documents(x['source_documents']),
|
232 |
+
'question': itemgetter('question')}
|
233 |
+
logging.info('Combined documents: '+str(final_inputs))
|
234 |
+
# And finally, we do the part that returns the answers
|
235 |
+
answer = {
|
236 |
+
'answer': final_inputs
|
237 |
+
| QA_PROMPT
|
238 |
+
| llm,
|
239 |
+
'references': itemgetter('source_documents')}
|
240 |
+
conversational_qa_chain = loaded_memory | standalone_question | retrieved_documents | answer
|
241 |
+
logging.info('Conversational QA chain: '+str(conversational_qa_chain))
|
242 |
+
return conversational_qa_chain
|
243 |
+
def _process_retriever_args(filter_arg,
|
244 |
+
sources,
|
245 |
+
search_type,
|
246 |
+
k,
|
247 |
+
fetch_k):
|
248 |
+
'''
|
249 |
+
Process arguments for retriever.
|
250 |
+
'''
|
251 |
+
# Implement filter
|
252 |
+
if filter_arg:
|
253 |
+
filter_list = list(set(item['source'] for item in sources[-1]))
|
254 |
+
filter_items=[]
|
255 |
+
for item in filter_list:
|
256 |
+
filter_item={'source': item}
|
257 |
+
filter_items.append(filter_item)
|
258 |
+
filter={'$or':filter_items}
|
259 |
+
else:
|
260 |
+
filter=None
|
261 |
|
262 |
+
# Impement filtering and number of documents to return
|
263 |
+
if search_type=='mmr':
|
264 |
+
search_kwargs={'k':k,'fetch_k':fetch_k,'filter':filter} # See as_retriever docs for parameters
|
265 |
+
else:
|
266 |
+
search_kwargs={'k':k,'filter':filter} # See as_retriever docs for parameters
|
267 |
+
|
268 |
+
return search_kwargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
setup.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
import json
|
4 |
+
|
5 |
+
import openai
|
6 |
+
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
# Set up the page, enable logging
|
10 |
+
from dotenv import load_dotenv,find_dotenv
|
11 |
+
load_dotenv(find_dotenv(),override=True)
|
12 |
+
|
13 |
+
def load_sidebar(config_file,
|
14 |
+
index_data_file,
|
15 |
+
vector_databases=False,
|
16 |
+
embeddings=False,
|
17 |
+
rag_type=False,
|
18 |
+
index_name=False,
|
19 |
+
llm=False,
|
20 |
+
model_options=False,
|
21 |
+
secret_keys=False):
|
22 |
+
"""
|
23 |
+
Sets up the sidebar based no toggled options. Returns variables with options.
|
24 |
+
"""
|
25 |
+
sb_out={}
|
26 |
+
with open(config_file, 'r') as f:
|
27 |
+
config = json.load(f)
|
28 |
+
databases = {db['name']: db for db in config['databases']}
|
29 |
+
llms = {m['name']: m for m in config['llms']}
|
30 |
+
logging.info('Loaded: '+config_file)
|
31 |
+
with open(index_data_file, 'r') as f:
|
32 |
+
index_data = json.load(f)
|
33 |
+
logging.info('Loaded: '+index_data_file)
|
34 |
+
|
35 |
+
if vector_databases:
|
36 |
+
# Vector databases
|
37 |
+
st.sidebar.title('Vector database')
|
38 |
+
sb_out['index_type']=st.sidebar.selectbox('Index type', list(databases.keys()), index=1)
|
39 |
+
logging.info('Index type: '+sb_out['index_type'])
|
40 |
+
|
41 |
+
if embeddings:
|
42 |
+
# Embeddings
|
43 |
+
st.sidebar.title('Embeddings')
|
44 |
+
if sb_out['index_type']=='RAGatouille': # Default to selecting hugging face model for RAGatouille, otherwise select alternates
|
45 |
+
sb_out['query_model']=st.sidebar.selectbox('Hugging face rag models', databases[sb_out['index_type']]['hf_rag_models'], index=0)
|
46 |
+
else:
|
47 |
+
sb_out['query_model']=st.sidebar.selectbox('Embedding models', databases[sb_out['index_type']]['embedding_models'], index=0)
|
48 |
+
|
49 |
+
if sb_out['query_model']=='Openai':
|
50 |
+
sb_out['embedding_name']='text-embedding-ada-002'
|
51 |
+
elif sb_out['query_model']=='Voyage':
|
52 |
+
sb_out['embedding_name']='voyage-02'
|
53 |
+
logging.info('Query type: '+sb_out['query_model'])
|
54 |
+
if 'embedding_name' in locals() or 'embedding_name' in globals():
|
55 |
+
logging.info('Embedding name: '+sb_out['embedding_name'])
|
56 |
+
if rag_type:
|
57 |
+
if sb_out['index_type']!='RAGatouille': # RAGatouille doesn't have a rag_type
|
58 |
+
# RAG Type
|
59 |
+
st.sidebar.title('RAG Type')
|
60 |
+
sb_out['rag_type']=st.sidebar.selectbox('RAG type', config['rag_types'], index=0)
|
61 |
+
sb_out['smart_agent']=st.sidebar.checkbox('Smart agent?')
|
62 |
+
logging.info('RAG type: '+sb_out['rag_type'])
|
63 |
+
logging.info('Smart agent: '+str(sb_out['smart_agent']))
|
64 |
+
if index_name:
|
65 |
+
# Index Name
|
66 |
+
st.sidebar.title('Index Name')
|
67 |
+
sb_out['index_name']=index_data[sb_out['index_type']][sb_out['query_model']]
|
68 |
+
st.sidebar.markdown('Index name: '+sb_out['index_name'])
|
69 |
+
logging.info('Index name: '+sb_out['index_name'])
|
70 |
+
if llm:
|
71 |
+
# LLM
|
72 |
+
st.sidebar.title('LLM')
|
73 |
+
sb_out['llm_source']=st.sidebar.selectbox('LLM model', list(llms.keys()), index=0)
|
74 |
+
logging.info('LLM source: '+sb_out['llm_source'])
|
75 |
+
if sb_out['llm_source']=='OpenAI':
|
76 |
+
sb_out['llm_model']=st.sidebar.selectbox('OpenAI model', llms[sb_out['llm_source']]['models'], index=0)
|
77 |
+
if sb_out['llm_source']=='Hugging Face':
|
78 |
+
sb_out['llm_model']=st.sidebar.selectbox('Hugging Face model', llms[sb_out['llm_source']]['models'], index=0)
|
79 |
+
if model_options:
|
80 |
+
# Add input fields in the sidebar
|
81 |
+
st.sidebar.title('LLM Options')
|
82 |
+
temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=2.0, value=0.0, step=0.1)
|
83 |
+
output_level = st.sidebar.selectbox('Level of Output', ['Concise', 'Detailed'], index=1)
|
84 |
+
|
85 |
+
st.sidebar.title('Retrieval Options')
|
86 |
+
k = st.sidebar.number_input('Number of items per prompt', min_value=1, step=1, value=4)
|
87 |
+
if sb_out['index_type']!='RAGatouille':
|
88 |
+
search_type = st.sidebar.selectbox('Search Type', ['similarity', 'mmr'], index=0)
|
89 |
+
sb_out['model_options']={'output_level':output_level,
|
90 |
+
'k':k,
|
91 |
+
'search_type':search_type,
|
92 |
+
'temperature':temperature}
|
93 |
+
else:
|
94 |
+
sb_out['model_options']={'output_level':output_level,
|
95 |
+
'k':k,
|
96 |
+
'temperature':temperature}
|
97 |
+
logging.info('Model options: '+str(sb_out['model_options']))
|
98 |
+
if secret_keys:
|
99 |
+
# Add a section for secret keys
|
100 |
+
st.sidebar.title('Secret keys')
|
101 |
+
st.sidebar.markdown('If .env file is in directory, will use that first.')
|
102 |
+
sb_out['keys']={}
|
103 |
+
if 'llm_source' in sb_out and sb_out['llm_source'] == 'OpenAI':
|
104 |
+
sb_out['keys']['OPENAI_API_KEY'] = st.sidebar.text_input('OpenAI API Key', type='password')
|
105 |
+
elif 'query_model' in sb_out and sb_out['query_model'] == 'Openai':
|
106 |
+
sb_out['keys']['OPENAI_API_KEY'] = st.sidebar.text_input('OpenAI API Key', type='password')
|
107 |
+
if 'llm_source' in sb_out and sb_out['llm_source']=='Hugging Face':
|
108 |
+
sb_out['keys']['HUGGINGFACEHUB_API_TOKEN'] = st.sidebar.text_input('Hugging Face API Key', type='password')
|
109 |
+
if 'query_model' in sb_out and sb_out['query_model']=='Voyage':
|
110 |
+
sb_out['keys']['VOYAGE_API_KEY'] = st.sidebar.text_input('Voyage API Key', type='password')
|
111 |
+
if 'index_type' in sb_out and sb_out['index_type']=='Pinecone':
|
112 |
+
sb_out['keys']['PINECONE_API_KEY']=st.sidebar.text_input('Pinecone API Key',type='password')
|
113 |
+
return sb_out
|
114 |
+
def set_secrets(sb):
|
115 |
+
"""
|
116 |
+
Sets secrets from environment file, or from sidebar if not available.
|
117 |
+
"""
|
118 |
+
secrets={}
|
119 |
+
secrets['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
|
120 |
+
openai.api_key = secrets['OPENAI_API_KEY']
|
121 |
+
if not secrets['OPENAI_API_KEY']:
|
122 |
+
secrets['OPENAI_API_KEY'] = sb['keys']['OPENAI_API_KEY']
|
123 |
+
os.environ['OPENAI_API_KEY'] = secrets['OPENAI_API_KEY']
|
124 |
+
openai.api_key = secrets['OPENAI_API_KEY']
|
125 |
+
|
126 |
+
secrets['VOYAGE_API_KEY'] = os.getenv('VOYAGE_API_KEY')
|
127 |
+
if not secrets['VOYAGE_API_KEY']:
|
128 |
+
secrets['VOYAGE_API_KEY'] = sb['keys']['VOYAGE_API_KEY']
|
129 |
+
os.environ['VOYAGE_API_KEY'] = secrets['VOYAGE_API_KEY']
|
130 |
+
|
131 |
+
secrets['PINECONE_API_KEY'] = os.getenv('PINECONE_API_KEY')
|
132 |
+
if not secrets['PINECONE_API_KEY']:
|
133 |
+
secrets['PINECONE_API_KEY'] = sb['keys']['PINECONE_API_KEY']
|
134 |
+
os.environ['PINECONE_API_KEY'] = secrets['PINECONE_API_KEY']
|
135 |
+
|
136 |
+
secrets['HUGGINGFACEHUB_API_TOKEN'] = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
137 |
+
if not secrets['HUGGINGFACEHUB_API_TOKEN']:
|
138 |
+
secrets['HUGGINGFACEHUB_API_TOKEN'] = sb['keys']['HUGGINGFACEHUB_API_TOKEN']
|
139 |
+
os.environ['HUGGINGFACEHUB_API_TOKEN'] = secrets['HUGGINGFACEHUB_API_TOKEN']
|
140 |
+
return secrets
|