|
import reflex as rx |
|
import json |
|
import requests |
|
from typing import Optional, List |
|
from pydantic import BaseModel, Field |
|
|
|
|
|
|
|
from guardrails.hub import ToxicLanguage |
|
from guardrails import Guard |
|
|
|
|
|
from guardrails.hub import DetectPII |
|
|
|
|
|
from guardrails.hub import QARelevanceLLMEval |
|
|
|
import logging |
|
logger = logging.getLogger("uvicorn").info |
|
|
|
from .summary import summarize_it |
|
|
|
|
|
def IsPii(answer: str) -> bool: |
|
guard = Guard().use(DetectPII, |
|
["EMAIL_ADDRESS", "PHONE_NUMBER"], |
|
"exception", |
|
) |
|
try: |
|
guard.validate(answer) |
|
return True |
|
|
|
except Exception as e: |
|
print(e) |
|
return False |
|
|
|
def IsToxic(query: str, threshold=0.5) -> bool: |
|
|
|
|
|
|
|
guard = Guard().use( |
|
ToxicLanguage, |
|
threshold=threshold, |
|
validation_method="sentence", |
|
on_fail="exception" |
|
) |
|
|
|
try: |
|
guard.validate(query) |
|
return False |
|
|
|
except Exception as e: |
|
print(e) |
|
return True |
|
|
|
def IsRelevant(answer: str, query: str, model: str="gpt-3.5-turbo") -> bool: |
|
|
|
guard = Guard().use( |
|
QARelevanceLLMEval, |
|
llm_callable=model, |
|
on_fail="exception", |
|
) |
|
|
|
try: |
|
guard.validate( |
|
answer, |
|
metadata={"original_prompt": query}, |
|
) |
|
return True |
|
except Exception as e: |
|
print(e) |
|
return False |
|
|
|
|
|
|