mr / app /engine /post_process.py
JPBianchi's picture
endpoint only, no UI
ae92cb7
import reflex as rx
import json
import requests
from typing import Optional, List
from pydantic import BaseModel, Field
# from rerank import ReRanker
# https://hub.guardrailsai.com/validator/guardrails/toxic_language
from guardrails.hub import ToxicLanguage
from guardrails import Guard
# guardrails hub install hub://guardrails/detect_pii
from guardrails.hub import DetectPII
# https://hub.guardrailsai.com/validator/guardrails/qa_relevance_llm_eval
from guardrails.hub import QARelevanceLLMEval
import logging
logger = logging.getLogger("uvicorn").info
from .summary import summarize_it
def IsPii(answer: str) -> bool:
guard = Guard().use(DetectPII,
["EMAIL_ADDRESS", "PHONE_NUMBER"],
"exception",
)
try:
guard.validate(answer)
return True
except Exception as e:
print(e)
return False
def IsToxic(query: str, threshold=0.5) -> bool:
# https://hub.guardrailsai.com/validator/guardrails/toxic_language
# Use the Guard with the validator
guard = Guard().use(
ToxicLanguage,
threshold=threshold, # high for highly toxic only
validation_method="sentence",
on_fail="exception"
)
try:
guard.validate(query)
return False
except Exception as e:
print(e) # will output the toxic question
return True
def IsRelevant(answer: str, query: str, model: str="gpt-3.5-turbo") -> bool:
guard = Guard().use(
QARelevanceLLMEval,
llm_callable=model,
on_fail="exception",
)
try:
guard.validate(
answer,
metadata={"original_prompt": query},
)
return True
except Exception as e:
print(e)
return False