from fastapi import FastAPI from pydantic import BaseModel from huggingface_hub import InferenceClient from fastapi.responses import StreamingResponse import uvicorn app = FastAPI() client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") class Item(BaseModel): prompt: str history: list system_prompt: str temperature: float = 0.0 max_new_tokens: int = 1048 top_p: float = 0.15 repetition_penalty: float = 1.0 def format_prompt(message, history): print("````") print(message) print("++++") print(history) print("````") prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt async def generate_stream(item: Item): temperature = float(item.temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(item.top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=item.max_new_tokens, top_p=top_p, repetition_penalty=item.repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = format_prompt(f"{item.system_prompt} [/INST] Ok..! [INST] {item.prompt}", item.history) print(formatted_prompt) print("=======") print(item.history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) for response in stream: yield response.token.text # Stream each token as it's received @app.post("/generate/") async def generate_text(item: Item): return StreamingResponse(generate_stream(item), media_type="text/plain")