File size: 2,443 Bytes
2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 8f23b65 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 7b2204c 2df6947 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from haystack.telemetry import tutorial_running
import logging
from haystack.document_stores import InMemoryDocumentStore
from haystack.pipelines.standard_pipelines import TextIndexingPipeline
from haystack.nodes import BM25Retriever
from haystack.nodes import FARMReader
from haystack.pipelines import ExtractiveQAPipeline
from pprint import pprint
from haystack.utils import print_answers
from haystack.nodes import EmbeddingRetriever
import codecs
from haystack.pipelines import FAQPipeline
from haystack.utils import print_answers
import logging
from haystack.telemetry import tutorial_running
from haystack.document_stores import InMemoryDocumentStore
from haystack.nodes import EmbeddingRetriever
import pandas as pd
from haystack.pipelines import FAQPipeline
from haystack.utils import print_answers
tutorial_running(6)
logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING)
logging.getLogger("haystack").setLevel(logging.INFO)
document_store = InMemoryDocumentStore()
f = codecs.open('faq.txt','r','UTF-8')
line = f.readlines()
lines = []
for i in range(2,33,2):
line.pop(i)
for i in range(33):
line[i] = line[i][:-2]
for i in range(0,33,2):
lines.append([line[i],line[i+1]])
colu = ['question','answer']
df = pd.DataFrame(data=lines, columns=colu)
retriever = EmbeddingRetriever(
document_store=document_store,
embedding_model="sentence-transformers/all-MiniLM-L6-v2",
use_gpu=True,
scale_score=False,
)
df['embedding'] = retriever.embed_queries(queries=question).tolist()
df = df.rename(columns={'question': 'content'})
question = list(df['question'].values)
docs_to_index = df.to_dict(orient='records')
document_store.write_documents(docs_to_index)
def haysstack(input,retriever=retriever):
pipe = FAQPipeline(retriever=retriever)
prediction = pipe.run(query=input, params={"Retriever": {"top_k": 1}})
return prediction['answers'].split(',')
# Run any question and change top_k to see more or less answers
import gradio as gr
from gradio.components import Textbox
inputs = Textbox(lines=7, label="请输入你的问题")
outputs = Textbox(lines=7, label="来自智能客服的回答")
gr.Interface(fn=haysstack, inputs=inputs, outputs=outputs, title="电商客服",
description="我是您的电商客服,您可以问任何你想知道的问题",
theme=gr.themes.Default()).launch(share=True)
|