mou3az commited on
Commit
51c7afb
1 Parent(s): 48d355a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -1,25 +1,24 @@
1
- import PyPDF2
2
  import os
3
- from bs4 import BeautifulSoup
4
- import tempfile
5
  import csv
6
  import json
7
- import xml.etree.ElementTree as ET
8
  import docx
9
  import pptx
10
- import openpyxl
11
  import re
12
  import nltk
13
  import time
 
 
 
14
  import requests
15
  import gradio as gr
 
 
16
  from nltk.tokenize import word_tokenize
17
- from langchain_community.embeddings import SentenceTransformerEmbeddings
18
  from langchain_community.vectorstores import FAISS
19
- from langchain_community.llms import HuggingFaceEndpoint
20
- from langchain.schema import SystemMessage, HumanMessage, AIMessage
21
- from langchain_community.chat_models.huggingface import ChatHuggingFace
22
  from youtube_transcript_api import YouTubeTranscriptApi
 
 
 
23
  from youtube_transcript_api._errors import NoTranscriptFound, TranscriptsDisabled, VideoUnavailable
24
  nltk.download('punkt')
25
  nltk.download('omw-1.4')
@@ -203,7 +202,7 @@ def Chat_Message(history):
203
  message=HumanMessage(content=history[-1][0])
204
  messages.append(message)
205
  response = chat_model.invoke(messages)
206
- messages.append(response.content)
207
 
208
  if len(messages) >= 8:
209
  messages = messages[-8:]
@@ -239,7 +238,7 @@ def Web_Search(history):
239
  msg==HumanMessage(content=augmented_prompt)
240
  messages.append(msg)
241
  response = chat_model.invoke(msg)
242
- messages.append(response.content)
243
 
244
  if len(messages) >= 8:
245
  messages = messages[-8:]
@@ -269,7 +268,7 @@ def Chart_Generator(history):
269
  messages.append(prompt)
270
 
271
  res = chat_model.invoke(messages)
272
- messages.append(res.content)
273
 
274
  if len(messages) >= 8:
275
  messages = messages[-8:]
@@ -322,7 +321,7 @@ def Link_Scratch(history):
322
  message = HumanMessage(content=augmented_prompt)
323
  messages.append(message)
324
  response = chat_model.invoke(messages)
325
- messages.append(response.content)
326
 
327
  if len(messages) >= 1:
328
  messages = messages[-1:]
@@ -381,7 +380,7 @@ def File_Interact(history,filepath):
381
  message = HumanMessage(content=augmented_prompt)
382
  messages.append(message)
383
  response = chat_model.invoke(messages)
384
- messages.append(response.content)
385
 
386
  if len(messages) >= 1:
387
  messages = messages[-1:]
 
 
1
  import os
 
 
2
  import csv
3
  import json
 
4
  import docx
5
  import pptx
 
6
  import re
7
  import nltk
8
  import time
9
+ import PyPDF2
10
+ import tempfile
11
+ import openpyxl
12
  import requests
13
  import gradio as gr
14
+ from bs4 import BeautifulSoup
15
+ import xml.etree.ElementTree as ET
16
  from nltk.tokenize import word_tokenize
 
17
  from langchain_community.vectorstores import FAISS
 
 
 
18
  from youtube_transcript_api import YouTubeTranscriptApi
19
+ from langchain.schema import SystemMessage, HumanMessage, AIMessage
20
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
21
+ from langchain_community.embeddings import SentenceTransformerEmbeddings
22
  from youtube_transcript_api._errors import NoTranscriptFound, TranscriptsDisabled, VideoUnavailable
23
  nltk.download('punkt')
24
  nltk.download('omw-1.4')
 
202
  message=HumanMessage(content=history[-1][0])
203
  messages.append(message)
204
  response = chat_model.invoke(messages)
205
+ messages.append(AIMessage(content=response.content))
206
 
207
  if len(messages) >= 8:
208
  messages = messages[-8:]
 
238
  msg==HumanMessage(content=augmented_prompt)
239
  messages.append(msg)
240
  response = chat_model.invoke(msg)
241
+ messages.append(AIMessage(content=response.content))
242
 
243
  if len(messages) >= 8:
244
  messages = messages[-8:]
 
268
  messages.append(prompt)
269
 
270
  res = chat_model.invoke(messages)
271
+ messages.append(AIMessage(content=response.content))
272
 
273
  if len(messages) >= 8:
274
  messages = messages[-8:]
 
321
  message = HumanMessage(content=augmented_prompt)
322
  messages.append(message)
323
  response = chat_model.invoke(messages)
324
+ messages.append(AIMessage(content=response.content))
325
 
326
  if len(messages) >= 1:
327
  messages = messages[-1:]
 
380
  message = HumanMessage(content=augmented_prompt)
381
  messages.append(message)
382
  response = chat_model.invoke(messages)
383
+ messages.append(AIMessage(content=response.content))
384
 
385
  if len(messages) >= 1:
386
  messages = messages[-1:]