Spaces:
Sleeping
Sleeping
Upload 35 files
Browse files
app_drive.py
CHANGED
@@ -11,8 +11,9 @@ from driveapi.drive_database import create_chroma_db
|
|
11 |
|
12 |
# global time_diff, model_name, search_type
|
13 |
time_diff = 0
|
14 |
-
|
15 |
-
model_name = "gpt-4-1106-preview"
|
|
|
16 |
search_type = "stuff"
|
17 |
input_question = ""
|
18 |
model_response = ""
|
@@ -83,12 +84,15 @@ def save_feedback(feedback):
|
|
83 |
["Question", "Response", "Model", "Time", "Feedback"],
|
84 |
[input_question, model_response, model_name, time_diff, user_feedback]
|
85 |
]
|
86 |
-
|
87 |
-
if user_feedback != "
|
88 |
upload_chat_to_drive(log_data, file_name)
|
89 |
|
90 |
def default_feedback():
|
91 |
-
return "
|
|
|
|
|
|
|
92 |
|
93 |
def text_feedback(feedback):
|
94 |
global text_feedback
|
@@ -145,10 +149,10 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="emerald", neutral_hue="slate"))
|
|
145 |
with gr.Row():
|
146 |
with gr.Column():
|
147 |
feedback_radio = gr.Radio(
|
148 |
-
choices=["1", "2", "3", "4", "5", "6", "
|
149 |
-
value=["
|
150 |
label="How would you rate the current response?",
|
151 |
-
info="Choosing a number sends the following diagnostic data to the developer - Question, Response, Time Taken. Let it be
|
152 |
)
|
153 |
|
154 |
with gr.Column():
|
@@ -157,7 +161,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="emerald", neutral_hue="slate"))
|
|
157 |
|
158 |
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
159 |
msg.submit(default_feedback, outputs=[feedback_radio])
|
160 |
-
|
161 |
|
162 |
feedback_radio.change(
|
163 |
fn=save_feedback,
|
|
|
11 |
|
12 |
# global time_diff, model_name, search_type
|
13 |
time_diff = 0
|
14 |
+
model_name="gpt-3.5-turbo-1106"
|
15 |
+
# model_name = "gpt-4-1106-preview"
|
16 |
+
# model_name = "gpt-4-0125-preview"
|
17 |
search_type = "stuff"
|
18 |
input_question = ""
|
19 |
model_response = ""
|
|
|
84 |
["Question", "Response", "Model", "Time", "Feedback"],
|
85 |
[input_question, model_response, model_name, time_diff, user_feedback]
|
86 |
]
|
87 |
+
|
88 |
+
if user_feedback[0] != "None":
|
89 |
upload_chat_to_drive(log_data, file_name)
|
90 |
|
91 |
def default_feedback():
|
92 |
+
return "None"
|
93 |
+
|
94 |
+
def default_text():
|
95 |
+
return ""
|
96 |
|
97 |
def text_feedback(feedback):
|
98 |
global text_feedback
|
|
|
149 |
with gr.Row():
|
150 |
with gr.Column():
|
151 |
feedback_radio = gr.Radio(
|
152 |
+
choices=["1", "2", "3", "4", "5", "6", "None"],
|
153 |
+
value=["None"],
|
154 |
label="How would you rate the current response?",
|
155 |
+
info="Choosing a number sends the following diagnostic data to the developer - Question, Response, Time Taken. Let it be [None] to not send any data.",
|
156 |
)
|
157 |
|
158 |
with gr.Column():
|
|
|
161 |
|
162 |
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
163 |
msg.submit(default_feedback, outputs=[feedback_radio])
|
164 |
+
msg.submit(default_text, outputs=[feedback_text])
|
165 |
|
166 |
feedback_radio.change(
|
167 |
fn=save_feedback,
|
driveapi/service.py
CHANGED
@@ -6,6 +6,7 @@ def get_credentials():
|
|
6 |
private_key = os.environ.get('GOOGLE_PRIVATE_KEY').replace('\\n', '\n')
|
7 |
token_uri = os.environ.get('GOOGLE_TOKEN_URI')
|
8 |
|
|
|
9 |
# Create credentials object
|
10 |
credentials_info = {
|
11 |
'type': 'service_account',
|
|
|
6 |
private_key = os.environ.get('GOOGLE_PRIVATE_KEY').replace('\\n', '\n')
|
7 |
token_uri = os.environ.get('GOOGLE_TOKEN_URI')
|
8 |
|
9 |
+
|
10 |
# Create credentials object
|
11 |
credentials_info = {
|
12 |
'type': 'service_account',
|
lc_base/README_BASE.md
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatLLMs
|
2 |
+
Gradio Interface for LLM-Powered PDF Chats
|
3 |
+
|
4 |
+
This chatbot is designed to provide intelligent responses and answers to questions based on the content of PDF documents.Leverages [Gradio](https://www.gradio.app/) as a user-friendly interface to engage with chatbots powered by [OpenAI](https://openai.com/) models based on [langchain](https://www.langchain.com/). Additionally, it incorporates [ChromaDB](https://www.trychroma.com/) for efficient data storage.
|
5 |
+
|
6 |
+
Current LLM used - GPT4-1106-preview
|
7 |
+
|
8 |
+
A base interface demo is available on this [HF space](https://huggingface.co/spaces/Koshti10/Chat_literature) for testing
|
9 |
+
|
10 |
+
## Getting started
|
11 |
+
|
12 |
+
Clone this repository and add your OpenAI API key in local environment
|
13 |
+
|
14 |
+
```python
|
15 |
+
git clone https://github.com/kushal-10/chatllms
|
16 |
+
cd chatllms
|
17 |
+
export OPENAI_API_KEY = <your secret key>
|
18 |
+
```
|
19 |
+
|
20 |
+
Install required dependencies
|
21 |
+
```python
|
22 |
+
pip install -r requirements.txt
|
23 |
+
```
|
24 |
+
|
25 |
+
## Usage
|
26 |
+
|
27 |
+
### Chatting over all the given documents, using stuff to iterate over 100 most relevant documents
|
28 |
+
|
29 |
+
Step 1:
|
30 |
+
|
31 |
+
Create a new folder under `inputs`, for example `new_docs`, and add your PDFs here.
|
32 |
+
|
33 |
+
Step 2:
|
34 |
+
Specify this as `inp_dir` in `save_db.py` and additionally specify where you would like the Chroma database to be stored in `out_dir`.Then run
|
35 |
+
|
36 |
+
```python
|
37 |
+
python3 lc_base/save_db.py
|
38 |
+
```
|
39 |
+
|
40 |
+
Step 3:
|
41 |
+
Specify the `out_dir` in `app.py` along with additional parameters and then run `app.py` to run the gradio interface locally.
|
42 |
+
```
|
43 |
+
python3 app.py
|
44 |
+
```
|
45 |
+
|
46 |
+
Add the API key and chat away!!
|
47 |
+
|
48 |
+
### Chatting over summaries of all given documents using map_reduce.
|
49 |
+
|
50 |
+
Step 1:
|
51 |
+
|
52 |
+
Create a new folder under `inputs`, for example `new_docs`, and add your PDFs here.
|
53 |
+
|
54 |
+
Step 2:
|
55 |
+
Specify this as `inpur_dir` in `main.py` and additionally specify in which folder you would like the individual Chroma database to be stored in `output_dir`. Also specify where you would like to save combined database of summaries. Change other params if required. Then run
|
56 |
+
|
57 |
+
```python
|
58 |
+
python3 main.py
|
59 |
+
```
|
60 |
+
|
61 |
+
Step 3:
|
62 |
+
Specify the `output_dir` in `app.py` along with additional parameters and then run `app.py` to run the gradio interface locally.
|
63 |
+
```
|
64 |
+
python3 app.py
|
65 |
+
```
|
66 |
+
|
67 |
+
Add the API key and chat away!!
|
68 |
+
|
69 |
+
All the responses will be saved in csv files under logs folder
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
outputs/combined/policy_eu_asia_usa/faiss_index/index.faiss
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7391277
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02401a46a0fd138933c3b84114cf8c193444181edf55bb9a4cb4431bc039351d
|
3 |
size 7391277
|
outputs/combined/policy_eu_asia_usa/faiss_index/index.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1285422
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:970472b3e8de040d6d6136dc8658bf6d94b23c30a581f28a541d79458b6f6aa8
|
3 |
size 1285422
|
requirements.txt
CHANGED
@@ -7,4 +7,4 @@ openai==0.28.1
|
|
7 |
langchain==0.0.331
|
8 |
google-auth
|
9 |
google-auth-httplib2
|
10 |
-
google-api-python-client
|
|
|
7 |
langchain==0.0.331
|
8 |
google-auth
|
9 |
google-auth-httplib2
|
10 |
+
google-api-python-client
|