File size: 1,863 Bytes
ae92cb7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import reflex as rx
import json
import requests
from typing import Optional, List
from pydantic import BaseModel, Field
# from rerank import ReRanker
# https://hub.guardrailsai.com/validator/guardrails/toxic_language
from guardrails.hub import ToxicLanguage
from guardrails import Guard
# guardrails hub install hub://guardrails/detect_pii
from guardrails.hub import DetectPII
# https://hub.guardrailsai.com/validator/guardrails/qa_relevance_llm_eval
from guardrails.hub import QARelevanceLLMEval
import logging
logger = logging.getLogger("uvicorn").info
from .summary import summarize_it
def IsPii(answer: str) -> bool:
guard = Guard().use(DetectPII,
["EMAIL_ADDRESS", "PHONE_NUMBER"],
"exception",
)
try:
guard.validate(answer)
return True
except Exception as e:
print(e)
return False
def IsToxic(query: str, threshold=0.5) -> bool:
# https://hub.guardrailsai.com/validator/guardrails/toxic_language
# Use the Guard with the validator
guard = Guard().use(
ToxicLanguage,
threshold=threshold, # high for highly toxic only
validation_method="sentence",
on_fail="exception"
)
try:
guard.validate(query)
return False
except Exception as e:
print(e) # will output the toxic question
return True
def IsRelevant(answer: str, query: str, model: str="gpt-3.5-turbo") -> bool:
guard = Guard().use(
QARelevanceLLMEval,
llm_callable=model,
on_fail="exception",
)
try:
guard.validate(
answer,
metadata={"original_prompt": query},
)
return True
except Exception as e:
print(e)
return False
|