Spaces:
Sleeping
Sleeping
File size: 1,068 Bytes
bfa9638 10399f1 44296f1 bfa9638 10399f1 44296f1 10399f1 831a016 10399f1 831a016 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
from fastapi import APIRouter, Depends, HTTPException, Body
from ..dependencies import get_current_user
from ..utils.chat_rag import llm_infer
from ..utils.chat_rag import sanitize_collection_name
from typing import Any
router = APIRouter()
@router.post("/user/chat")
async def chat_with_llama(user_input: str = Body(..., embed=True), current_user: Any = Depends(get_current_user)):
# Example logic for model inference (pseudo-code, adjust as necessary)
try:
user_id = current_user["user_id"]
model_response = llm_infer(user_collection_name=sanitize_collection_name(user_id), prompt=user_input)
# Optionally, store chat history
# chromadb_face_helper.store_chat_history(user_id=current_user["user_id"], user_input=user_input, model_response=model_response)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return {
"ai_response": model_response,
"user_id": current_user["user_id"],
"name": current_user["name"],
"role": current_user["role"]
}
|