File size: 2,889 Bytes
a576a00 0770009 5a92112 a576a00 929443d 5a92112 a576a00 5a92112 a576a00 9105c19 91b399f 929443d e2b1e4b 929443d e2b1e4b 929443d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import streamlit as st
from haystack import Pipeline
from haystack_integrations.document_stores.pinecone import PineconeDocumentStore
from haystack.components.builders.answer_builder import AnswerBuilder
from haystack.components.builders.prompt_builder import PromptBuilder
from haystack_integrations.components.embedders.cohere import CohereTextEmbedder
from haystack_integrations.components.retrievers.pinecone import PineconeEmbeddingRetriever
from haystack_integrations.components.generators.cohere import CohereGenerator
from haystack import Document
def start_haystack(openai_key):
document_store = PineconeDocumentStore(dimension=1024, index="zen", environment = "gcp-starter")
template = """
You are a support agent replying to customers' messages. Use the context to answer the customer, starting by greeting them and ending with goodbyes.
DO NOT TRY TO GUESS INFORMATION. If the context doesn't provide you with the answer, ONLY say this: [].
Context:
{% for document in documents %}
{{ document.content }}
{% endfor %}
Customer's message: {{ query }}?
"""
st.session_state["haystack_started"] = True
pipe = Pipeline()
pipe.add_component("text_embedder", CohereTextEmbedder(model="embed-english-v3.0"))
pipe.add_component("retriever", PineconeEmbeddingRetriever(document_store=document_store, top_k=3))
pipe.add_component("prompt_builder", PromptBuilder(template=template))
pipe.add_component("llm", CohereGenerator(model="command-nightly"))
pipe.add_component("answer_builder", AnswerBuilder())
pipe.connect("text_embedder.embedding", "retriever.query_embedding")
pipe.connect("retriever", "prompt_builder.documents")
pipe.connect("prompt_builder", "llm")
pipe.connect("llm.replies", "answer_builder.replies")
pipe.connect("llm.meta", "answer_builder.meta")
pipe.connect("retriever", "answer_builder.documents")
return pipe
@st.cache_data(show_spinner=True)
def query(prompt, _pipe):
with st.spinner('Processing'):
try:
replies = _pipe.run({
"text_embedder": {
"text": prompt
},
"prompt_builder": {
"query": prompt
},
"answer_builder": {
"query": prompt
}
})
raw = replies['answer_builder']['answers'][0]
print("Raw:")
print(raw)
result = raw.data + "\n\n -- Source: " + raw.documents[0].content + " --"
print("Result:")
print(raw.data)
st.success('Completed!')
except Exception as e:
print("Hay:")
print(e)
result = ["Something went wrong!"]
st.error('Failed!')
return result |