File size: 3,218 Bytes
ddcdb82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import chainlit as cl
import tiktoken
import os
from dotenv import load_dotenv
# from langchain.document_loaders import PyMuPDFLoader
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_openai import OpenAIEmbeddings
# from langchain_community.chat_models import OpenAIEmbeddings

from langchain_core.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain.vectorstores import Pinecone
from langchain_community.vectorstores import Pinecone
from operator import itemgetter
from langchain.schema.runnable import RunnablePassthrough
from langchain_openai import ChatOpenAI
from langchain.schema.runnable.config import RunnableConfig
from langchain_core.output_parsers import StrOutputParser

load_dotenv()

RAG_PROMPT = """

CONTEXT:
{context}

QUERY:
{question}

You are a car specialist and can only provide your answers from the context. 

Don't tell in your response that you are getting it from the context.

"""

init_settings = {
    "model": "gpt-3.5-turbo",
    "temperature": 0,
    "max_tokens": 500,
    "top_p": 1,
    "frequency_penalty": 0,
    "presence_penalty": 0,
}

# embeddings = OpenAIEmbeddings(model="text-embedding-3-small")

def tiktoken_len(text):
    tokens = tiktoken.encoding_for_model("gpt-3.5-turbo").encode(
        text,
    )
    return len(tokens)

car_manual = PyMuPDFLoader(os.environ.get('pdfurl'))

car_manual_data = car_manual.load()

text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 400,
chunk_overlap = 50,
length_function = tiktoken_len)
    
car_manual_chunks = text_splitter.split_documents(car_manual_data)

embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
vector_store = Pinecone.from_documents(car_manual_chunks, embedding_model, index_name=os.environ.get('index'))
retriever = vector_store.as_retriever()

rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)

model = ChatOpenAI(model="gpt-3.5-turbo")



@cl.on_chat_start
async def main():
    # text_splitter = RecursiveCharacterTextSplitter(
    # chunk_size = 400,
    # chunk_overlap = 50,
    # length_function = tiktoken_len)
    
    # car_manual_chunks = text_splitter.split_documents(car_manual_data)

    # embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
    # vector_store = Pinecone.from_documents(car_manual_chunks, embedding_model, index_name=os.environ.get('index'))
    # retriever = vector_store.as_retriever()

    # rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)

    # model = ChatOpenAI(model="gpt-3.5-turbo")

    mecanic_qa_chain = (
        {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
        | RunnablePassthrough.assign(context=itemgetter("context"))
        | rag_prompt | model | StrOutputParser()
    )

    cl.user_session.set("runnable", mecanic_qa_chain)



@cl.on_message
async def on_message(message: cl.Message):
    runnable = cl.user_session.get("runnable")
    msg = cl.Message(content="")

    async for chunk in runnable.astream(
        {"question":message.content},
        config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
    ):
        await msg.stream_token(chunk)