File size: 2,914 Bytes
a7018f5
 
 
 
 
 
 
14a321b
 
a7018f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os
from langchain_huggingface import HuggingFacePipeline
from langchain_huggingface.llms import HuggingFacePipeline
from langchain.agents import AgentType, initialize_agent, load_tools
from transformers import pipeline



SERPAPI_API_KEY = '1e2cf72abd974364e933d854720ec3704bb86196ee9a25267e53947192afada6'

from serpapi import GoogleSearch

params = {
    
    "engine": "google",
    "q": "ESPN",
    "api_key": "1e2cf72abd974364e933d854720ec3704bb86196ee9a25267e53947192afada6"
    }

search = GoogleSearch(params)
results = search.get_dict()
organic_results = results["organic_results"]

llm = HuggingFacePipeline(pipeline=pipeline("text2text-generation",
               model="google/flan-t5-xl",
               model_kwargs={"temperature":0.5},
                huggingfacehub_api_token='<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>'

               ))

tools = load_tools(["serpapi", "llm-math"], llm=llm, serpapi_api_key=SERPAPI_API_KEY)

class fact_checking_pipeline:
    fact_checking_pipeline = pipeline("text-classification",
                                  model="typeform/distilbert-base-uncased-mnli")


#Configure Langchain Agent
agent_chain = initialize_agent(
    tools,
    llm,
    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True,
)

class RefereeAgent:
    def __init__(self):
        self.serpapi_api_key = "1e2cf72abd974364e933d854720ec3704bb86196ee9a25267e53947192afada6"
        self.fact_checking_pipeline = pipeline("text-classification",
                                  model="typeform/distilbert-base-uncased-mnli")

    def search_serpapi(self, query):
        search_url = "https://serpapi.com/search"
        params = {
            "q": query,
            "api_key": self.serpapi_api_key,
        }
        response = requests.get(search_url, params=params)
        return response.json()

    def extract_relevant_info(self, serpapi_results):
        relevant_info = []
        for result in serpapi_results.get('organic_results', []):
            title = result.get('title')
            snippet = result.get('snippet')
            link = result.get('link')
            relevant_info.append({'title': title, 'snippet': snippet, 'link': link})
        return relevant_info

    def fact_check(self, statement):
        serpapi_results = self.search_serpapi(statement)
        relevant_info = self.extract_relevant_info(serpapi_results)
        
        if not relevant_info:
            return "No relevant data found."

        context = " ".join([info['snippet'] for info in relevant_info])
        result = self.fact_checking_pipeline(f"{statement} [SEP] {context}")
        
        label = result[0]['label']
        score = result[0]['score']
        
        if label == "ENTAILMENT":
            return f"The statement is likely true. Confidence score: {score}"
        else:
            return f"The statement is likely false. Confidence score: {score}"