File size: 1,641 Bytes
47b5f0c
819bacd
fe7c659
47b5f0c
 
 
 
819bacd
bc0b69d
819bacd
 
47b5f0c
 
 
 
 
 
819bacd
47b5f0c
 
819bacd
47b5f0c
819bacd
47b5f0c
 
 
 
 
819bacd
 
47b5f0c
 
 
819bacd
47b5f0c
 
 
 
 
 
 
819bacd
47b5f0c
 
 
 
 
819bacd
47b5f0c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from qdrant_client.conversions import common_types as types

from app.db_local_storage.in_memory_db import query_response_storage
from app.infrastructure.models.my_models import HybridSearchResponse, ModelResponse
from app.modules.hybridSearcher.hybridSearcher import HybridSearcher
from app.modules.questionAnswer.questionAnswer import QuestionAnswering
from transformers import pipeline


class QuerySearchFeature:

    def __init__(
        self,
        qa_pipeline: pipeline,
        hybrid_searcher: HybridSearcher,
        question_answering: QuestionAnswering,
    ):
        self.qa_pipeline = qa_pipeline
        self.hybrid_searcher = hybrid_searcher
        self.question_answering = question_answering

    async def query_search(self, query: str) -> ModelResponse:

        query_response_storage.append(
            {
                "text": query,
                "isSender": True,
            }
        )

        result = self.hybrid_searcher.hybrid_search(query)
        context = self.get_and_join_context(result)
        model_response = self.question_answering.answer_question(query, context)

        # TODO: Manage memory for display messages
        query_response_storage.append(
            {
                "text": model_response,
                "isSender": False,
            }
        )

        return ModelResponse(
            text=model_response,
            isSender=False,
            message="success",
        )

    def get_and_join_context(self, search_result: HybridSearchResponse) -> str:
        contexts = [point["chunk-text"] for point in search_result.data]
        return ", ".join(contexts)